repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gertingold/scipy | doc/source/tutorial/examples/optimize_global_1.py | 15 | 1752 | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def eggholder(x):
return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))
-x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))
bounds = [(-512, 512), (-512, 512)]
x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['BH'] = optimize.basinhopping(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=200, iters=5,
sampling_method='sobol')
fig = plt.figure(figsize=(4.5, 4.5))
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_point(res, marker='o', color=None):
ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)
plot_point(results['BH'], color='y') # basinhopping - yellow
plot_point(results['DE'], color='c') # differential_evolution - cyan
plot_point(results['DA'], color='w') # dual_annealing. - white
# SHGO produces multiple minima, plot them all (with a smaller marker size)
plot_point(results['shgo'], color='r', marker='+')
plot_point(results['shgo_sobol'], color='r', marker='x')
for i in range(results['shgo_sobol'].xl.shape[0]):
ax.plot(512 + results['shgo_sobol'].xl[i, 0],
512 + results['shgo_sobol'].xl[i, 1],
'ro', ms=2)
ax.set_xlim([-4, 514*2])
ax.set_ylim([-4, 514*2])
fig.tight_layout()
plt.show()
| bsd-3-clause |
camallen/aggregation | experimental/serengeti/IAAI/weight.py | 2 | 1437 | #!/usr/bin/env python
import csv
#ASG000pt52,merxator wildebeest
photos = {}
beta = 1
def weight(TP,TN,FP,FN):
if (TP+beta*TN + FP+FN) == 0:
return -1
return (TP+beta*TN)/float(TP+beta*TN + FP+FN)
searchFor = "zebra"
with open("/home/greg/Databases/goldMergedSerengeti.csv") as f:
reader = csv.reader(f,delimiter="\t")
for meta,speciesList in reader:
photoID,userID = meta.split(",")
animals = [s.split(":")[0] for s in speciesList.split(",")]
if userID == "pjeversman":
if searchFor in animals:
photos[photoID] = True
else:
photos[photoID] = False
TP = 0.
TN = 0.
FP = 0.
FN = 0.
weightValues = []
with open("/home/greg/Downloads/Expert_Classifications_For_4149_S4_Captures.csv") as f:
reader = csv.reader(f)
next(reader, None)
for photoID,image,count,s1,s2,s3 in reader:
if photoID in photos:
if (searchFor in [s1,s2,s3]):
if photos[photoID]:
TP += 1
else:
FN += 1
else:
if photos[photoID]:
FP += 1
else:
TN += 1
weightValues.append(weight(TP,TN,FP,FN))
print TP,TN,FP,FN
print photos
import matplotlib.pyplot as plt
plt.plot(range(len(weightValues)),weightValues)
plt.ylim(0.5,1.1)
plt.xlabel(str(beta))
plt.show() | apache-2.0 |
DonBeo/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
HolgerPeters/scikit-learn | examples/cluster/plot_face_ward_segmentation.py | 71 | 2460 | """
=========================================================================
A demo of structured Ward hierarchical clustering on a raccoon face image
=========================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
# Generate data
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
X = np.reshape(face, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*face.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
moutai/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
potash/scikit-learn | sklearn/cross_decomposition/pls_.py | 35 | 30767 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from distutils.version import LooseVersion
from sklearn.utils.extmath import svd_flip
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
import scipy
pinv2_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
pinv2_args = {'check_finite': False}
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = linalg.pinv2(X, **pinv2_args)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# If y_score only has zeros x_weights will only have zeros. In
# this case add an epsilon to converge to a more acceptable
# solution
if np.dot(x_weights.T, x_weights) < eps:
x_weights += eps
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
**pinv2_args))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
**pinv2_args))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
kazemakase/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
mmilutinovic1313/zipline-with-algorithms | zipline/finance/risk/cumulative.py | 3 | 18852 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
from zipline.finance import trading
import zipline.utils.math_utils as zp_math
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import iteritems
from . risk import (
alpha,
check_entry,
choose_treasury,
downside_risk,
sharpe_ratio,
sortino_ratio,
)
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Risk Cumulative')
choose_treasury = functools.partial(choose_treasury, lambda *args: '10year',
compound=False)
def information_ratio(algo_volatility, algorithm_return, benchmark_return):
"""
http://en.wikipedia.org/wiki/Information_ratio
Args:
algorithm_returns (np.array-like):
All returns during algorithm lifetime.
benchmark_returns (np.array-like):
All benchmark returns during algo lifetime.
Returns:
float. Information ratio.
"""
if zp_math.tolerant_equals(algo_volatility, 0):
return np.nan
# The square of the annualization factor is in the volatility,
# because the volatility is also annualized,
# i.e. the sqrt(annual factor) is in the volatility's numerator.
# So to have the the correct annualization factor for the
# Sharpe value's numerator, which should be the sqrt(annual factor).
# The square of the sqrt of the annual factor, i.e. the annual factor
# itself, is needed in the numerator to factor out the division by
# its square root.
return (algorithm_return - benchmark_return) / algo_volatility
class RiskMetricsCumulative(object):
"""
:Usage:
Instantiate RiskMetricsCumulative once.
Call update() method on each dt to update the metrics.
"""
METRIC_NAMES = (
'alpha',
'beta',
'sharpe',
'algorithm_volatility',
'benchmark_volatility',
'downside_risk',
'sortino',
'information',
)
def __init__(self, sim_params,
returns_frequency=None,
create_first_day_stats=False,
account=None):
"""
- @returns_frequency allows for configuration of the whether
the benchmark and algorithm returns are in units of minutes or days,
if `None` defaults to the `emission_rate` in `sim_params`.
"""
self.treasury_curves = trading.environment.treasury_curves
self.start_date = sim_params.period_start.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.end_date = sim_params.period_end.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.trading_days = trading.environment.days_in_range(
self.start_date,
self.end_date)
# Hold on to the trading day before the start,
# used for index of the zero return value when forcing returns
# on the first day.
self.day_before_start = self.start_date - \
trading.environment.trading_days.freq
last_day = normalize_date(sim_params.period_end)
if last_day not in self.trading_days:
last_day = pd.tseries.index.DatetimeIndex(
[last_day]
)
self.trading_days = self.trading_days.append(last_day)
self.sim_params = sim_params
self.create_first_day_stats = create_first_day_stats
if returns_frequency is None:
returns_frequency = self.sim_params.emission_rate
self.returns_frequency = returns_frequency
if returns_frequency == 'daily':
cont_index = self.get_daily_index()
elif returns_frequency == 'minute':
cont_index = self.get_minute_index(sim_params)
self.cont_index = cont_index
self.algorithm_returns_cont = pd.Series(index=cont_index)
self.benchmark_returns_cont = pd.Series(index=cont_index)
self.algorithm_cumulative_leverages_cont = pd.Series(index=cont_index)
self.mean_returns_cont = pd.Series(index=cont_index)
self.annualized_mean_returns_cont = pd.Series(index=cont_index)
self.mean_benchmark_returns_cont = pd.Series(index=cont_index)
self.annualized_mean_benchmark_returns_cont = pd.Series(
index=cont_index)
# The returns at a given time are read and reset from the respective
# returns container.
self.algorithm_returns = None
self.benchmark_returns = None
self.mean_returns = None
self.annualized_mean_returns = None
self.mean_benchmark_returns = None
self.annualized_mean_benchmark_returns = None
self.algorithm_cumulative_returns = pd.Series(index=cont_index)
self.benchmark_cumulative_returns = pd.Series(index=cont_index)
self.algorithm_cumulative_leverages = pd.Series(index=cont_index)
self.excess_returns = pd.Series(index=cont_index)
self.latest_dt = cont_index[0]
self.metrics = pd.DataFrame(index=cont_index,
columns=self.METRIC_NAMES,
dtype=float)
self.drawdowns = pd.Series(index=cont_index)
self.max_drawdowns = pd.Series(index=cont_index)
self.max_drawdown = 0
self.max_leverages = pd.Series(index=cont_index)
self.max_leverage = 0
self.current_max = -np.inf
self.daily_treasury = pd.Series(index=self.trading_days)
self.treasury_period_return = np.nan
self.num_trading_days = 0
def get_minute_index(self, sim_params):
"""
Stitches together multiple days worth of business minutes into
one continous index.
"""
trading_minutes = None
for day in self.trading_days:
minutes_for_day = trading.environment.market_minutes_for_day(day)
if trading_minutes is None:
# Create container for all minutes on first iteration
trading_minutes = minutes_for_day
else:
trading_minutes = trading_minutes.union(minutes_for_day)
return trading_minutes
def get_daily_index(self):
return self.trading_days
def update(self, dt, algorithm_returns, benchmark_returns, account):
# Keep track of latest dt for use in to_dict and other methods
# that report current state.
self.latest_dt = dt
self.algorithm_returns_cont[dt] = algorithm_returns
self.algorithm_returns = self.algorithm_returns_cont[:dt]
self.num_trading_days = len(self.algorithm_returns)
if self.create_first_day_stats:
if len(self.algorithm_returns) == 1:
self.algorithm_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.algorithm_returns)
self.algorithm_cumulative_returns[dt] = \
self.calculate_cumulative_returns(self.algorithm_returns)
algo_cumulative_returns_to_date = \
self.algorithm_cumulative_returns[:dt]
self.mean_returns_cont[dt] = \
algo_cumulative_returns_to_date[dt] / self.num_trading_days
self.mean_returns = self.mean_returns_cont[:dt]
self.annualized_mean_returns_cont[dt] = \
self.mean_returns_cont[dt] * 252
self.annualized_mean_returns = self.annualized_mean_returns_cont[:dt]
if self.create_first_day_stats:
if len(self.mean_returns) == 1:
self.mean_returns = pd.Series(
{self.day_before_start: 0.0}).append(self.mean_returns)
self.annualized_mean_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.annualized_mean_returns)
self.benchmark_returns_cont[dt] = benchmark_returns
self.benchmark_returns = self.benchmark_returns_cont[:dt]
if self.create_first_day_stats:
if len(self.benchmark_returns) == 1:
self.benchmark_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.benchmark_returns)
self.benchmark_cumulative_returns[dt] = \
self.calculate_cumulative_returns(self.benchmark_returns)
benchmark_cumulative_returns_to_date = \
self.benchmark_cumulative_returns[:dt]
self.mean_benchmark_returns_cont[dt] = \
benchmark_cumulative_returns_to_date[dt] / self.num_trading_days
self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt]
self.annualized_mean_benchmark_returns_cont[dt] = \
self.mean_benchmark_returns_cont[dt] * 252
self.annualized_mean_benchmark_returns = \
self.annualized_mean_benchmark_returns_cont[:dt]
self.algorithm_cumulative_leverages_cont[dt] = account['leverage']
self.algorithm_cumulative_leverages = \
self.algorithm_cumulative_leverages_cont[:dt]
if self.create_first_day_stats:
if len(self.algorithm_cumulative_leverages) == 1:
self.algorithm_cumulative_leverages = pd.Series(
{self.day_before_start: 0.0}).append(
self.algorithm_cumulative_leverages)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date,
dt=dt
)
raise Exception(message)
self.update_current_max()
self.metrics.benchmark_volatility[dt] = \
self.calculate_volatility(self.benchmark_returns)
self.metrics.algorithm_volatility[dt] = \
self.calculate_volatility(self.algorithm_returns)
# caching the treasury rates for the minutely case is a
# big speedup, because it avoids searching the treasury
# curves on every minute.
# In both minutely and daily, the daily curve is always used.
treasury_end = dt.replace(hour=0, minute=0)
if np.isnan(self.daily_treasury[treasury_end]):
treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
treasury_end
)
self.daily_treasury[treasury_end] = treasury_period_return
self.treasury_period_return = self.daily_treasury[treasury_end]
self.excess_returns[self.latest_dt] = (
self.algorithm_cumulative_returns[self.latest_dt] -
self.treasury_period_return)
self.metrics.beta[dt] = self.calculate_beta()
self.metrics.alpha[dt] = self.calculate_alpha()
self.metrics.sharpe[dt] = self.calculate_sharpe()
self.metrics.downside_risk[dt] = self.calculate_downside_risk()
self.metrics.sortino[dt] = self.calculate_sortino()
self.metrics.information[dt] = self.calculate_information()
self.max_drawdown = self.calculate_max_drawdown()
self.max_drawdowns[dt] = self.max_drawdown
self.max_leverage = self.calculate_max_leverage()
self.max_leverages[dt] = self.max_leverage
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
dt = self.latest_dt
period_label = dt.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.metrics.benchmark_volatility[dt],
'algo_volatility': self.metrics.algorithm_volatility[dt],
'treasury_period_return': self.treasury_period_return,
# Though the two following keys say period return,
# they would be more accurately called the cumulative return.
# However, the keys need to stay the same, for now, for backwards
# compatibility with existing consumers.
'algorithm_period_return': self.algorithm_cumulative_returns[dt],
'benchmark_period_return': self.benchmark_cumulative_returns[dt],
'beta': self.metrics.beta[dt],
'alpha': self.metrics.alpha[dt],
'sharpe': self.metrics.sharpe[dt],
'sortino': self.metrics.sortino[dt],
'information': self.metrics.information[dt],
'excess_return': self.excess_returns[dt],
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: (None if check_entry(k, v) else v)
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self.metrics, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def calculate_cumulative_returns(self, returns):
return (1. + returns).prod() - 1
def update_current_max(self):
if len(self.algorithm_cumulative_returns) == 0:
return
current_cumulative_return = \
self.algorithm_cumulative_returns[self.latest_dt]
if self.current_max < current_cumulative_return:
self.current_max = current_cumulative_return
def calculate_max_drawdown(self):
if len(self.algorithm_cumulative_returns) == 0:
return self.max_drawdown
# The drawdown is defined as: (high - low) / high
# The above factors out to: 1.0 - (low / high)
#
# Instead of explicitly always using the low, use the current total
# return value, and test that against the max drawdown, which will
# exceed the previous max_drawdown iff the current return is lower than
# the previous low in the current drawdown window.
cur_drawdown = 1.0 - (
(1.0 + self.algorithm_cumulative_returns[self.latest_dt]) /
(1.0 + self.current_max))
self.drawdowns[self.latest_dt] = cur_drawdown
if self.max_drawdown < cur_drawdown:
return cur_drawdown
else:
return self.max_drawdown
def calculate_max_leverage(self):
# The leverage is defined as: the gross_exposure/net_liquidation
# gross_exposure = long_exposure + abs(short_exposure)
# net_liquidation = ending_cash + long_exposure + short_exposure
cur_leverage = self.algorithm_cumulative_leverages[self.latest_dt]
return max(cur_leverage, self.max_leverage)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.metrics.algorithm_volatility[self.latest_dt],
self.annualized_mean_returns[self.latest_dt],
self.daily_treasury[self.latest_dt.date()])
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
return sortino_ratio(self.annualized_mean_returns[self.latest_dt],
self.daily_treasury[self.latest_dt.date()],
self.metrics.downside_risk[self.latest_dt])
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(
self.metrics.algorithm_volatility[self.latest_dt],
self.annualized_mean_returns[self.latest_dt],
self.annualized_mean_benchmark_returns[self.latest_dt])
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.annualized_mean_returns[self.latest_dt],
self.treasury_period_return,
self.annualized_mean_benchmark_returns[self.latest_dt],
self.metrics.beta[self.latest_dt])
def calculate_volatility(self, daily_returns):
if len(daily_returns) <= 1:
return 0.0
return np.std(daily_returns, ddof=1) * math.sqrt(252)
def calculate_downside_risk(self):
return downside_risk(self.algorithm_returns.values,
self.mean_returns.values,
252)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two
# values, so return none.
if len(self.algorithm_returns) < 2:
return 0.0
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return beta
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__) if
(not k.startswith('_') and not k == 'treasury_curves')}
STATE_VERSION = 2
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 2
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsCumulative \
saved state is too old.")
self.__dict__.update(state)
# This are big and we don't need to serialize them
# pop them back in now
self.treasury_curves = trading.environment.treasury_curves
| apache-2.0 |
ryscet/pyseries | pyseries/Analysis/erp.py | 1 | 1071 | """Event related potentials - averages time locked to an event"""
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
def plot_erp(epochs_dict, electrode):
#colors = {}
#colors['wb_Diode'] = 'b'
#colors['bw_Diode'] = 'b'
#colors['bw_Port'] = 'g'
#colors['wb_Port'] = 'g'
fig, axes = plt.subplots()
sample_duration = 1000 / 497.975590198584
time_line = np.arange(-100, 100, 1) * sample_duration
#fig2, axes2 = plt.subplots()
for key, value in epochs_dict[electrode].items():
#g = sns.tsplot(data=value, time = time_line, color = colors[key], condition = key, ax = axes)
g = sns.tsplot(data=value, time = time_line, condition = key, ax = axes)
break
#g2 = sns.tsplot(data=value, time = np.arange(0, len(value[0,:]), 1), color = colors[key], condition = key, ax = axes2, err_style = 'unit_traces')
#axes.legend()
axes.axvline(0, linestyle = '--', c = 'black', label = 'switch')
axes.set_xlabel('Millieconds from event marker')
axes.legend() | mit |
r0k3/trading-with-python | cookbook/reconstructVXX/downloadVixFutures.py | 77 | 3012 | #-------------------------------------------------------------------------------
# Name: download CBOE futures
# Purpose: get VIX futures data from CBOE, process data to a single file
#
#
# Created: 15-10-2011
# Copyright: (c) Jev Kuznetsov 2011
# Licence: BSD
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from urllib import urlretrieve
import os
from pandas import *
import datetime
import numpy as np
m_codes = ['F','G','H','J','K','M','N','Q','U','V','X','Z'] #month codes of the futures
codes = dict(zip(m_codes,range(1,len(m_codes)+1)))
#dataDir = os.path.dirname(__file__)+'/data'
dataDir = os.path.expanduser('~')+'/twpData/vixFutures'
print 'Data directory: ', dataDir
def saveVixFutureData(year,month, path, forceDownload=False):
''' Get future from CBOE and save to file '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
if os.path.exists(path+'\\'+fName) or forceDownload:
print 'File already downloaded, skipping'
return
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
print 'Getting: %s' % urlStr
try:
urlretrieve(urlStr,path+'\\'+fName)
except Exception as e:
print e
def buildDataTable(dataDir):
""" create single data sheet """
files = os.listdir(dataDir)
data = {}
for fName in files:
print 'Processing: ', fName
try:
df = DataFrame.from_csv(dataDir+'/'+fName)
code = fName.split('.')[0].split('_')[1]
month = '%02d' % codes[code[0]]
year = '20'+code[1:]
newCode = year+'_'+month
data[newCode] = df
except Exception as e:
print 'Could not process:', e
full = DataFrame()
for k,df in data.iteritems():
s = df['Settle']
s.name = k
s[s<5] = np.nan
if len(s.dropna())>0:
full = full.join(s,how='outer')
else:
print s.name, ': Empty dataset.'
full[full<5]=np.nan
full = full[sorted(full.columns)]
# use only data after this date
startDate = datetime.datetime(2008,1,1)
idx = full.index >= startDate
full = full.ix[idx,:]
#full.plot(ax=gca())
fName = os.path.expanduser('~')+'/twpData/vix_futures.csv'
print 'Saving to ', fName
full.to_csv(fName)
if __name__ == '__main__':
if not os.path.exists(dataDir):
print 'creating data directory %s' % dataDir
os.makedirs(dataDir)
for year in range(2008,2013):
for month in range(12):
print 'Getting data for {0}/{1}'.format(year,month+1)
saveVixFutureData(year,month,dataDir)
print 'Raw wata was saved to {0}'.format(dataDir)
buildDataTable(dataDir) | bsd-3-clause |
saltstar/spark | python/setup.py | 1 | 9836 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.6'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas>=0.19.2', 'pyarrow>=0.8.0']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
oliverwangyi/networkgamesim | rgenpower.py | 1 | 2193 | #Require NetworkX, matplotlib
#create_degree_sequence has been sunset
#pesudo_power_sequence_alt gives a alternative way to generate degree sequence
import networkx as nx
from networkx.utils import powerlaw_sequence
import matplotlib.pyplot as plt
degree_sequence_collection = []
network_collection = []
edgelist_collection = []
#Generate a single degree sequence
def pesudo_power_sequence(n, gamma):
current_degree_sequence = nx.utils.create_degree_sequence(n, powerlaw_sequence, max_tries=50, exponent=gamma)
return sorted(current_degree_sequence)
#Alternative implementations of above function
def pesudo_power_sequence_alt(n, gamma):
unround_sequence = powerlaw_sequence(n, exponent=gamma)
current_degree_sequence=[min(n, max( int(round(unround_sequence)),0)) for s in unround_sequence]
return sorted(current_degree_sequence)
#Generate all sequence at a specific gamma
def sequence_collection_builder(n, k, gamma):
count = 0
while count < k:
current_degree_sequence = pesudo_power_sequence(n, gamma)
if current_degree_sequence not in degree_sequence_collection:
degree_sequence_collection.append(current_degree_sequence)
count = count+1
#generate 10 instances of sequence of the sepcific gamma
#sequence_collection_builder(100, 10, 2.6)
#Generate the graphs
#Note that configuration model may lead to self-loop and parallel if the there is too many nodes (>10,000)
#nx.Graph() removes parallel edges
def network_builder():
count = 0
while count < len(degree_sequence_collection):
temp_graph = nx.configuration_model(degree_sequence_collection[count])
network_collection.append(nx.Graph(temp_graph))
#Build edgelist from the configuration model for simulation
def build_edgelist():
for item in network_collection:
temp_edgelist = nx.to_edgelist(item)
edgelist_collection.append(temp_edgelist)
#In case you want to see the visualization of the graphs
#Use "plt.save()" if you want to save the picture
def graph_visual(network_collection):
for i in range(0, len(network_collection)):
nx.draw(network_collection[i])
plt.show()# Requires NetworkX
| mit |
wronk/mne-python | mne/viz/tests/test_utils.py | 5 | 2681 | # Authors: Alexandre Gramfort <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_allclose
from mne.viz.utils import compare_fiff, _fake_click
from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap
from mne.utils import run_tests_if_main
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
def test_mne_analyze_colormap():
"""Test mne_analyze_colormap
"""
assert_raises(ValueError, mne_analyze_colormap, [0])
assert_raises(ValueError, mne_analyze_colormap, [-1, 1, 2])
assert_raises(ValueError, mne_analyze_colormap, [0, 2, 1])
def test_compare_fiff():
import matplotlib.pyplot as plt
compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
plt.close('all')
def test_clickable_image():
"""Test the ClickableImage class."""
# Gen data and create clickable image
import matplotlib.pyplot as plt
im = np.random.RandomState(0).randn(100, 100)
clk = ClickableImage(im)
clicks = [(12, 8), (46, 48), (10, 24)]
# Generate clicks
for click in clicks:
_fake_click(clk.fig, clk.ax, click, xform='data')
assert_allclose(np.array(clicks), np.array(clk.coords))
assert_true(len(clicks) == len(clk.coords))
# Exporting to layout
lt = clk.to_layout()
assert_true(lt.pos.shape[0] == len(clicks))
assert_allclose(lt.pos[1, 0] / lt.pos[2, 0],
clicks[1][0] / float(clicks[2][0]))
clk.plot_clicks()
plt.close('all')
def test_add_background_image():
"""Test adding background image to a figure."""
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
f, axs = plt.subplots(1, 2)
x, y = rng.randn(2, 10)
im = rng.randn(10, 10)
axs[0].scatter(x, y)
axs[1].scatter(y, x)
for ax in axs:
ax.set_aspect(1)
# Background without changing aspect
ax_im = add_background_image(f, im)
assert_true(ax_im.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 1)
# Background with changing aspect
ax_im_asp = add_background_image(f, im, set_ratios='auto')
assert_true(ax_im_asp.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 'auto')
run_tests_if_main()
| bsd-3-clause |
q1ang/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/text/document_classification_20newsgroups.py | 37 | 10499 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
Obus/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
olologin/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 83 | 5888 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
Tihacker/Wikipedia-Templates-Analysis | categorizza/graficocategorie.py | 1 | 1226 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create template graphs png of multiple graphs.
Usage:
stampacategorie.py [options]
Options:
-f FILE Set the input. [default: categorie.csv]
-h --help Show this screen.
"""
import csv, re, pdb, ast, time, os, math
from docopt import docopt
import datetime
import matplotlib.pyplot as plot
import numpy as np
import matplotlib.dates as mdates
import tarfile
if __name__ == "__main__":
arguments = docopt(__doc__)
i = csv.reader(open(arguments['-f'], "r"))
count = {}
for line in i:
try:
count[line[1]] = count[line[1]] + 1
except KeyError:
count[line[1]] = 1
val = []
keys = []
for k in sorted(count.items()):
if k[0] != "non classificabile":
keys.append(k[0])
val.append(k[1])
X = np.arange(len(keys))
plot.bar(X, val, color = [ "red", "blue", "yellow", "green", "purple", "cyan"], align = "center", width = 0.5)
plot.xticks(X, keys)
ax = plot.subplot(111)
plot.ylabel('Number of templates')
ax.grid(which='major', alpha=0.5)
plot.savefig("categorie.png",bbox_inches='tight') | mit |
liberatorqjw/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 16 | 10538 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | test_naive_bayes.py | 2 | 2200 | #Program to train the classifier
#Importing necessary libraries to classify
import pickle
import pandas as pd
import numpy as np
import nltk
#Load as a table de txt files
a=pd.read_table('tweets_pos_clean.txt')
b=pd.read_table('tweets_neg_clean.txt')
#Initiate the auxiliar arrays
aux1=[]
aux2=[]
auxiliar1=[]
auxiliar2=[]
#Getting just the words that have more than 3 letters and either happy face or sad face
for element in a['Text']:
for w in element.split():
if (w==':)' or len(w)>3):
auxiliar1.append(w)
aux1.append((auxiliar1,'positive'))
auxiliar1=[]
for element in b['text']:
for w in element.split():
if (w==':(' or len(w)>3):
auxiliar2.append(w)
aux2.append((auxiliar2,'negative'))
auxiliar2=[]
#From the arrays just use the first 10000 values for the positive and 20000 for the negatives
aux1=aux1[:10000]
aux2=aux2[:20000]
#Convert to dataframe
pos_df=pd.DataFrame(aux1)
neg_df=pd.DataFrame(aux2)
#Naming the columns
pos_df.columns=['words','sentiment']
neg_df.columns=['words','sentiment']
#Concatenate positive and negative values
table_aux1=aux1+aux2
#Definition of functions
#Function for getting the single words and sentiments
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
#Function for getting the frequency of the words in the tweets
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
#Function to check if a tweet has a specific word
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
#Get the frequency of all the words in the dataset
word_features = get_word_features(get_words_in_tweets(table_aux1))
#Building the training set for the classifier
training_set = nltk.classify.apply_features(extract_features, table_aux1)
#Train the classifier
classifier = nltk.NaiveBayesClassifier.train(training_set)
#Save the classifier and the word_features in a pickle for external use
with open('objs.pickle','wb') as f:
pickle.dump([classifier, word_features],f)
| apache-2.0 |
timqian/sms-tools | software/transformations_interface/harmonicTransformations_function.py | 5 | 5387 | # function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import sineModel as SM
import harmonicModel as HM
import sineTransformations as ST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile='../../sounds/vignesh.wav', window='blackman', M=1201, N=2048, t=-90,
minSineDur=0.1, nH=100, minf0=130, maxf0=300, f0et=7, harmDevSlope=0.01):
"""
Analyze a sound with the harmonic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
returns inputFile: input file name; fs: sampling rate of input file, tfreq,
tmag: sinusoidal frequencies and magnitudes
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the harmonic model of the whole sound
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
# synthesize the sines without original phases
y = SM.sineModelSynth(hfreq, hmag, np.array([]), Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModel.wav'
# write the sound resulting from the inverse stft
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
if (hfreq.shape[1] > 0):
plt.subplot(3,1,2)
tracks = np.copy(hfreq)
numFrames = tracks.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of harmonic tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
return inputFile, fs, hfreq, hmag
def transformation_synthesis(inputFile, fs, hfreq, hmag, freqScaling = np.array([0, 2.0, 1, .3]),
freqStretching = np.array([0, 1, 1, 1.5]), timbrePreservation = 1,
timeScaling = np.array([0, .0, .671, .671, 1.978, 1.978+1.0])):
"""
Transform the analysis values returned by the analysis function and synthesize the sound
inputFile: name of input file
fs: sampling rate of input file
tfreq, tmag: sinusoidal frequencies and magnitudes
freqScaling: frequency scaling factors, in time-value pairs
freqStretchig: frequency stretching factors, in time-value pairs
timbrePreservation: 1 preserves original timbre, 0 it does not
timeScaling: time scaling factors, in time-value pairs
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# frequency scaling of the harmonics
yhfreq, yhmag = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)
# time scale the sound
yhfreq, yhmag = ST.sineTimeScaling(yhfreq, yhmag, timeScaling)
# synthesis
y = SM.sineModelSynth(yhfreq, yhmag, np.array([]), Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModelTransformation.wav'
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot the transformed sinusoidal frequencies
plt.subplot(2,1,1)
if (yhfreq.shape[1] > 0):
tracks = np.copy(yhfreq)
tracks = tracks*np.less(tracks, maxplotfreq)
tracks[tracks<=0] = np.nan
numFrames = int(tracks[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, tracks)
plt.title('transformed harmonic tracks')
plt.autoscale(tight=True)
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile, fs, hfreq, hmag = analysis()
# transformation and synthesis
transformation_synthesis (inputFile, fs, hfreq, hmag)
plt.show()
| agpl-3.0 |
raymondxyang/tensorflow | tensorflow/examples/get_started/regression/linear_regression_categorical.py | 8 | 3959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear regression with categorical features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import imports85 # pylint: disable=g-bad-import-order
STEPS = 1000
def main(argv):
"""Builds, trains, and evaluates the model."""
assert len(argv) == 1
(x_train, y_train), (x_test, y_test) = imports85.load_data()
# Build the training input_fn.
input_train = tf.estimator.inputs.pandas_input_fn(
x=x_train,
y=y_train,
# Setting `num_epochs` to `None` lets the `inpuf_fn` generate data
# indefinitely, leaving the call to `Estimator.train` in control.
num_epochs=None,
shuffle=True)
# Build the validation input_fn.
input_test = tf.estimator.inputs.pandas_input_fn(
x=x_test, y=y_test, shuffle=True)
# The following code demonstrates two of the ways that `feature_columns` can
# be used to build a model with categorical inputs.
# The first way assigns a unique weight to each category. To do this, you must
# specify the category's vocabulary (values outside this specification will
# receive a weight of zero).
# Alternatively, you can define the vocabulary in a file (by calling
# `categorical_column_with_vocabulary_file`) or as a range of positive
# integers (by calling `categorical_column_with_identity`)
body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"]
body_style_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="body-style", vocabulary_list=body_style_vocab)
# The second way, appropriate for an unspecified vocabulary, is to create a
# hashed column. It will create a fixed length list of weights, and
# automatically assign each input categort to a weight. Due to the
# pseudo-randomness of the process, some weights may be shared between
# categories, while others will remain unused.
make_column = tf.feature_column.categorical_column_with_hash_bucket(
key="make", hash_bucket_size=50)
feature_columns = [
# This model uses the same two numeric features as `linear_regressor.py`
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
# This model adds two categorical colums that will adjust the price based
# on "make" and "body-style".
body_style_column,
make_column,
]
# Build the Estimator.
model = tf.estimator.LinearRegressor(feature_columns=feature_columns)
# Train the model.
# By default, the Estimators log output every 100 steps.
model.train(input_fn=input_train, steps=STEPS)
# Evaluate how the model performs on data it has not yet seen.
eval_result = model.evaluate(input_fn=input_test)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss = eval_result["average_loss"]
# Convert MSE to Root Mean Square Error (RMSE).
print("\n" + 80 * "*")
print("\nRMS error for the test set: ${:.0f}".format(average_loss**0.5))
print()
if __name__ == "__main__":
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
| apache-2.0 |
rtavenar/tslearn | tslearn/preprocessing.py | 1 | 8561 | """
The :mod:`tslearn.preprocessing` module gathers time series scalers.
"""
import numpy
from sklearn.base import TransformerMixin
from scipy.interpolate import interp1d
import warnings
from tslearn.utils import to_time_series_dataset, check_equal_size, ts_size
__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'
class TimeSeriesResampler(TransformerMixin):
"""Resampler for time series. Resample time series so that they reach the
target size.
Parameters
----------
sz : int
Size of the output time series.
Examples
--------
>>> TimeSeriesResampler(sz=5).fit_transform([[0, 3, 6]])
array([[[0. ],
[1.5],
[3. ],
[4.5],
[6. ]]])
"""
def __init__(self, sz):
self.sz_ = sz
def fit(self, X, y=None, **kwargs):
"""A dummy method such that it complies to the sklearn requirements.
Since this method is completely stateless, it just returns itself.
Parameters
----------
X
Ignored
Returns
-------
self
"""
return self
def fit_transform(self, X, y=None, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset to be resampled.
Returns
-------
numpy.ndarray
Resampled time series dataset.
"""
return self.fit(X).transform(X)
def transform(self, X, y=None, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset to be resampled.
Returns
-------
numpy.ndarray
Resampled time series dataset.
"""
X_ = to_time_series_dataset(X)
n_ts, sz, d = X_.shape
equal_size = check_equal_size(X_)
X_out = numpy.empty((n_ts, self.sz_, d))
for i in range(X_.shape[0]):
xnew = numpy.linspace(0, 1, self.sz_)
if not equal_size:
sz = ts_size(X_[i])
for di in range(d):
f = interp1d(numpy.linspace(0, 1, sz), X_[i, :sz, di],
kind="slinear")
X_out[i, :, di] = f(xnew)
return X_out
class TimeSeriesScalerMinMax(TransformerMixin):
"""Scaler for time series. Scales time series so that their span in each
dimension is between ``min`` and ``max``.
Parameters
----------
value_range : tuple (default: (0., 1.))
The minimum and maximum value for the output time series.
min : float (default: 0.)
Minimum value for output time series.
.. deprecated:: 0.2
min is deprecated in version 0.2 and will be
removed in 0.4. Use value_range instead.
max : float (default: 1.)
Maximum value for output time series.
.. deprecated:: 0.2
min is deprecated in version 0.2 and will be
removed in 0.4. Use value_range instead.
Notes
-----
This method requires a dataset of equal-sized time series.
NaNs within a time series are ignored when calculating min and max.
Examples
--------
>>> TimeSeriesScalerMinMax(value_range=(1., 2.)).fit_transform([[0, 3, 6]])
array([[[1. ],
[1.5],
[2. ]]])
>>> TimeSeriesScalerMinMax(value_range=(1., 2.)).fit_transform(
... [[numpy.nan, 3, 6]]
... )
array([[[nan],
[ 1.],
[ 2.]]])
"""
def __init__(self, value_range=(0., 1.), min=None, max=None):
self.value_range = value_range
self.min_ = min
self.max_ = max
def fit(self, X, y=None, **kwargs):
"""A dummy method such that it complies to the sklearn requirements.
Since this method is completely stateless, it just returns itself.
Parameters
----------
X
Ignored
Returns
-------
self
"""
return self
def fit_transform(self, X, y=None, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset to be rescaled.
Returns
-------
numpy.ndarray
Resampled time series dataset.
"""
return self.fit(X).transform(X)
def transform(self, X, y=None, **kwargs):
"""Will normalize (min-max) each of the timeseries. IMPORTANT: this
transformation is completely stateless, and is applied to each of
the timeseries individually.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset to be rescaled.
Returns
-------
numpy.ndarray
Rescaled time series dataset.
"""
if self.min_ is not None:
warnings.warn(
"'min' is deprecated in version 0.2 and will be "
"removed in 0.4. Use value_range instead.",
DeprecationWarning, stacklevel=2)
self.value_range = (self.min_, self.value_range[1])
if self.max_ is not None:
warnings.warn(
"'max' is deprecated in version 0.2 and will be "
"removed in 0.4. Use value_range instead.",
DeprecationWarning, stacklevel=2)
self.value_range = (self.value_range[0], self.max_)
if self.value_range[0] >= self.value_range[1]:
raise ValueError("Minimum of desired range must be smaller"
" than maximum. Got %s." % str(self.value_range))
X_ = to_time_series_dataset(X)
min_t = numpy.nanmin(X_, axis=1)[:, numpy.newaxis, :]
max_t = numpy.nanmax(X_, axis=1)[:, numpy.newaxis, :]
range_t = max_t - min_t
nomin = (X_ - min_t) * (self.value_range[1] - self.value_range[0])
X_ = nomin / range_t + self.value_range[0]
return X_
class TimeSeriesScalerMeanVariance(TransformerMixin):
"""Scaler for time series. Scales time series so that their mean (resp.
standard deviation) in each dimension is
mu (resp. std).
Parameters
----------
mu : float (default: 0.)
Mean of the output time series.
std : float (default: 1.)
Standard deviation of the output time series.
Notes
-----
This method requires a dataset of equal-sized time series.
NaNs within a time series are ignored when calculating mu and std.
Examples
--------
>>> TimeSeriesScalerMeanVariance(mu=0.,
... std=1.).fit_transform([[0, 3, 6]])
array([[[-1.22474487],
[ 0. ],
[ 1.22474487]]])
>>> TimeSeriesScalerMeanVariance(mu=0.,
... std=1.).fit_transform([[numpy.nan, 3, 6]])
array([[[nan],
[-1.],
[ 1.]]])
"""
def __init__(self, mu=0., std=1.):
self.mu_ = mu
self.std_ = std
self.global_mean = None
self.global_std = None
def fit(self, X, y=None, **kwargs):
"""A dummy method such that it complies to the sklearn requirements.
Since this method is completely stateless, it just returns itself.
Parameters
----------
X
Ignored
Returns
-------
self
"""
return self
def fit_transform(self, X, y=None, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset to be rescaled.
Returns
-------
numpy.ndarray
Resampled time series dataset.
"""
return self.fit(X).transform(X)
def transform(self, X, y=None, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X : array-like of shape (n_ts, sz, d)
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Rescaled time series dataset
"""
X_ = to_time_series_dataset(X)
mean_t = numpy.nanmean(X_, axis=1)[:, numpy.newaxis, :]
std_t = numpy.nanstd(X_, axis=1)[:, numpy.newaxis, :]
std_t[std_t == 0.] = 1.
X_ = (X_ - mean_t) * self.std_ / std_t + self.mu_
return X_
| bsd-2-clause |
thomasaarholt/hyperspy | hyperspy/_signals/lazy.py | 2 | 41127 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from functools import partial
import warnings
import numpy as np
import dask.array as da
import dask.delayed as dd
from dask import threaded
from dask.diagnostics import ProgressBar
from itertools import product
from hyperspy.signal import BaseSignal
from hyperspy.defaults_parser import preferences
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.array_tools import _requires_linear_rebin
from hyperspy.misc.hist_tools import histogram_dask
from hyperspy.misc.machine_learning import import_sklearn
from hyperspy.misc.utils import multiply, dummy_context_manager
_logger = logging.getLogger(__name__)
lazyerror = NotImplementedError('This method is not available in lazy signals')
def to_array(thing, chunks=None):
"""Accepts BaseSignal, dask or numpy arrays and always produces either
numpy or dask array.
Parameters
----------
thing : {BaseSignal, dask.array.Array, numpy.ndarray}
the thing to be converted
chunks : {None, tuple of tuples}
If None, the returned value is a numpy array. Otherwise returns dask
array with the chunks as specified.
Returns
-------
res : {numpy.ndarray, dask.array.Array}
"""
if thing is None:
return None
if isinstance(thing, BaseSignal):
thing = thing.data
if chunks is None:
if isinstance(thing, da.Array):
thing = thing.compute()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def compute(self, close_file=False, show_progressbar=None, **kwargs):
"""Attempt to store the full signal in memory.
Parameters
----------
close_file : bool, default False
If True, attemp to close the file associated with the dask
array data if any. Note that closing the file will make all other
associated lazy signals inoperative.
%s
Returns
-------
None
"""
if "progressbar" in kwargs:
warnings.warn(
"The `progressbar` keyword is deprecated and will be removed "
"in HyperSpy 2.0. Use `show_progressbar` instead.",
VisibleDeprecationWarning,
)
show_progressbar = kwargs["progressbar"]
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
cm = ProgressBar if show_progressbar else dummy_context_manager
with cm():
da = self.data
data = da.compute()
if close_file:
self.close_file()
self.data = data
self._lazy = False
self._assign_subclass()
compute.__doc__ %= SHOW_PROGRESSBAR_ARG
def close_file(self):
"""Closes the associated data file if any.
Currently it only supports closing the file associated with a dask
array created from an h5py DataSet (default HyperSpy hdf5 reader).
"""
arrkey = None
for key in self.data.dask.keys():
if "array-original" in key:
arrkey = key
break
if arrkey:
try:
self.data.dask[arrkey].file.close()
except AttributeError:
_logger.exception("Failed to close lazy Signal file")
def _get_dask_chunks(self, axis=None, dtype=None):
"""Returns dask chunks.
Aims:
- Have at least one signal (or specified axis) in a single chunk,
or as many as fit in memory
Parameters
----------
axis : {int, string, None, axis, tuple}
If axis is None (default), returns chunks for current data shape so
that at least one signal is in the chunk. If an axis is specified,
only that particular axis is guaranteed to be "not sliced".
dtype : {string, np.dtype}
The dtype of target chunks.
Returns
-------
Tuple of tuples, dask chunks
"""
dc = self.data
dcshape = dc.shape
for _axis in self.axes_manager._axes:
if _axis.index_in_array < len(dcshape):
_axis.size = int(dcshape[_axis.index_in_array])
if axis is not None:
need_axes = self.axes_manager[axis]
if not np.iterable(need_axes):
need_axes = [need_axes, ]
else:
need_axes = self.axes_manager.signal_axes
if dtype is None:
dtype = dc.dtype
elif not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
typesize = max(dtype.itemsize, dc.dtype.itemsize)
want_to_keep = multiply([ax.size for ax in need_axes]) * typesize
# @mrocklin reccomends to have around 100MB chunks, so we do that:
num_that_fit = int(100. * 2.**20 / want_to_keep)
# want to have at least one "signal" per chunk
if num_that_fit < 2:
chunks = [tuple(1 for _ in range(i)) for i in dc.shape]
for ax in need_axes:
chunks[ax.index_in_array] = dc.shape[ax.index_in_array],
return tuple(chunks)
sizes = [
ax.size for ax in self.axes_manager._axes if ax not in need_axes
]
indices = [
ax.index_in_array for ax in self.axes_manager._axes
if ax not in need_axes
]
while True:
if multiply(sizes) <= num_that_fit:
break
i = np.argmax(sizes)
sizes[i] = np.floor(sizes[i] / 2)
chunks = []
ndim = len(dc.shape)
for i in range(ndim):
if i in indices:
size = float(dc.shape[i])
split_array = np.array_split(
np.arange(size), np.ceil(size / sizes[indices.index(i)]))
chunks.append(tuple(len(sp) for sp in split_array))
else:
chunks.append((dc.shape[i], ))
return tuple(chunks)
def _make_lazy(self, axis=None, rechunk=False, dtype=None):
self.data = self._lazy_data(axis=axis, rechunk=rechunk, dtype=dtype)
def change_dtype(self, dtype, rechunk=True):
from hyperspy.misc import rgb_tools
if not isinstance(dtype, np.dtype) and (dtype not in
rgb_tools.rgb_dtypes):
dtype = np.dtype(dtype)
self._make_lazy(rechunk=rechunk, dtype=dtype)
super().change_dtype(dtype)
change_dtype.__doc__ = BaseSignal.change_dtype.__doc__
def _lazy_data(self, axis=None, rechunk=True, dtype=None):
"""Return the data as a dask array, rechunked if necessary.
Parameters
----------
axis: None, DataAxis or tuple of data axes
The data axis that must not be broken into chunks when `rechunk`
is `True`. If None, it defaults to the current signal axes.
rechunk: bool, "dask_auto"
If `True`, it rechunks the data if necessary making sure that the
axes in ``axis`` are not split into chunks. If `False` it does
not rechunk at least the data is not a dask array, in which case
it chunks as if rechunk was `True`. If "dask_auto", rechunk if
necessary using dask's automatic chunk guessing.
"""
if rechunk == "dask_auto":
new_chunks = "auto"
else:
new_chunks = self._get_dask_chunks(axis=axis, dtype=dtype)
if isinstance(self.data, da.Array):
res = self.data
if self.data.chunks != new_chunks and rechunk:
_logger.info(
"Rechunking.\nOriginal chunks: %s" % str(self.data.chunks))
res = self.data.rechunk(new_chunks)
_logger.info(
"Final chunks: %s " % str(res.chunks))
else:
if isinstance(self.data, np.ma.masked_array):
data = np.where(self.data.mask, np.nan, self.data)
else:
data = self.data
res = da.from_array(data, chunks=new_chunks)
assert isinstance(res, da.Array)
return res
def _apply_function_on_data_and_remove_axis(self, function, axes,
out=None, rechunk=True):
def get_dask_function(numpy_name):
# Translate from the default numpy to dask functions
translations = {'amax': 'max', 'amin': 'min'}
if numpy_name in translations:
numpy_name = translations[numpy_name]
return getattr(da, numpy_name)
function = get_dask_function(function.__name__)
axes = self.axes_manager[axes]
if not np.iterable(axes):
axes = (axes, )
ar_axes = tuple(ax.index_in_array for ax in axes)
if len(ar_axes) == 1:
ar_axes = ar_axes[0]
# For reduce operations the actual signal and navigation
# axes configuration does not matter. Hence we leave
# dask guess the chunks
if rechunk is True:
rechunk = "dask_auto"
current_data = self._lazy_data(rechunk=rechunk)
# Apply reducing function
new_data = function(current_data, axis=ar_axes)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s = self._deepcopy_with_new_data(new_data)
s._remove_axis([ax.index_in_axes_manager for ax in axes])
return s
def rebin(self, new_shape=None, scale=None,
crop=False, out=None, rechunk=True):
factors = self._validate_rebin_args_and_get_factors(
new_shape=new_shape,
scale=scale)
if _requires_linear_rebin(arr=self.data, scale=factors):
if new_shape:
raise NotImplementedError(
"Lazy rebin requires that the new shape is a divisor "
"of the original signal shape e.g. if original shape "
"(10| 6), new_shape=(5| 3) is valid, (3 | 4) is not.")
else:
raise NotImplementedError(
"Lazy rebin requires scale to be integer and divisor of the "
"original signal shape")
axis = {ax.index_in_array: ax
for ax in self.axes_manager._axes}[factors.argmax()]
self._make_lazy(axis=axis, rechunk=rechunk)
return super().rebin(new_shape=new_shape,
scale=scale, crop=crop, out=out)
rebin.__doc__ = BaseSignal.rebin.__doc__
def __array__(self, dtype=None):
return self.data.__array__(dtype=dtype)
def _make_sure_data_is_contiguous(self):
self._make_lazy(rechunk=True)
def diff(self, axis, order=1, out=None, rechunk=True):
arr_axis = self.axes_manager[axis].index_in_array
def dask_diff(arr, n, axis):
# assume arr is da.Array already
n = int(n)
if n == 0:
return arr
if n < 0:
raise ValueError("order must be positive")
nd = len(arr.shape)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return dask_diff(arr[slice1] - arr[slice2], n - 1, axis=axis)
else:
return arr[slice1] - arr[slice2]
current_data = self._lazy_data(axis=axis, rechunk=rechunk)
new_data = dask_diff(current_data, order, arr_axis)
if not new_data.ndim:
new_data = new_data.reshape((1, ))
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
axis2 = s.axes_manager[axis]
new_offset = self.axes_manager[axis].offset + (order * axis2.scale / 2)
axis2.offset = new_offset
s.get_dimensions_from_data()
if out is None:
return s
else:
out.events.data_changed.trigger(obj=out)
diff.__doc__ = BaseSignal.diff.__doc__
def integrate_simpson(self, axis, out=None):
axis = self.axes_manager[axis]
from scipy import integrate
axis = self.axes_manager[axis]
data = self._lazy_data(axis=axis, rechunk=True)
new_data = data.map_blocks(
integrate.simps,
x=axis.axis,
axis=axis.index_in_array,
drop_axis=axis.index_in_array,
dtype=data.dtype)
s = out or self._deepcopy_with_new_data(new_data)
if out:
if out.data.shape == new_data.shape:
out.data = new_data
out.events.data_changed.trigger(obj=out)
else:
raise ValueError(
"The output shape %s does not match the shape of "
"`out` %s" % (new_data.shape, out.data.shape))
else:
s._remove_axis(axis.index_in_axes_manager)
return s
integrate_simpson.__doc__ = BaseSignal.integrate_simpson.__doc__
def valuemax(self, axis, out=None, rechunk=True):
idx = self.indexmax(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemax.__doc__ = BaseSignal.valuemax.__doc__
def valuemin(self, axis, out=None, rechunk=True):
idx = self.indexmin(axis, rechunk=rechunk)
old_data = idx.data
data = old_data.map_blocks(
lambda x: self.axes_manager[axis].index2value(x))
if out is None:
idx.data = data
return idx
else:
out.data = data
out.events.data_changed.trigger(obj=out)
valuemin.__doc__ = BaseSignal.valuemin.__doc__
def get_histogram(self, bins='fd', out=None, rechunk=True, **kwargs):
if 'range_bins' in kwargs:
_logger.warning("'range_bins' argument not supported for lazy "
"signals")
del kwargs['range_bins']
from hyperspy.signals import Signal1D
data = self._lazy_data(rechunk=rechunk).flatten()
hist, bin_edges = histogram_dask(data, bins=bins, **kwargs)
if out is None:
hist_spec = Signal1D(hist)
hist_spec._lazy = True
hist_spec._assign_subclass()
else:
hist_spec = out
# we always overwrite the data because the computation is lazy ->
# the result signal is lazy. Assume that the `out` is already lazy
hist_spec.data = hist
hist_spec.axes_manager[0].scale = bin_edges[1] - bin_edges[0]
hist_spec.axes_manager[0].offset = bin_edges[0]
hist_spec.axes_manager[0].size = hist.shape[-1]
hist_spec.axes_manager[0].name = 'value'
hist_spec.metadata.General.title = (
self.metadata.General.title + " histogram")
hist_spec.metadata.Signal.binned = True
if out is None:
return hist_spec
else:
out.events.data_changed.trigger(obj=out)
get_histogram.__doc__ = BaseSignal.get_histogram.__doc__
@staticmethod
def _estimate_poissonian_noise_variance(dc, gain_factor, gain_offset,
correlation_factor):
variance = (dc * gain_factor + gain_offset) * correlation_factor
# The lower bound of the variance is the gaussian noise.
variance = da.clip(variance, gain_offset * correlation_factor, np.inf)
return variance
# def _get_navigation_signal(self, data=None, dtype=None):
# return super()._get_navigation_signal(data=data, dtype=dtype).as_lazy()
# _get_navigation_signal.__doc__ = BaseSignal._get_navigation_signal.__doc__
# def _get_signal_signal(self, data=None, dtype=None):
# return super()._get_signal_signal(data=data, dtype=dtype).as_lazy()
# _get_signal_signal.__doc__ = BaseSignal._get_signal_signal.__doc__
def _calculate_summary_statistics(self, rechunk=True):
if rechunk is True:
# Use dask auto rechunk instead of HyperSpy's one, what should be
# better for these operations
rechunk = "dask_auto"
data = self._lazy_data(rechunk=rechunk)
_raveled = data.ravel()
_mean, _std, _min, _q1, _q2, _q3, _max = da.compute(
da.nanmean(data),
da.nanstd(data),
da.nanmin(data),
da.percentile(_raveled, [25, ]),
da.percentile(_raveled, [50, ]),
da.percentile(_raveled, [75, ]),
da.nanmax(data), )
return _mean, _std, _min, _q1, _q2, _q3, _max
def _map_all(self, function, inplace=True, **kwargs):
calc_result = dd(function)(self.data, **kwargs)
if inplace:
self.data = da.from_delayed(calc_result, shape=self.data.shape,
dtype=self.data.dtype)
return None
return self._deepcopy_with_new_data(calc_result)
def _map_iterate(self,
function,
iterating_kwargs=(),
show_progressbar=None,
parallel=None,
max_workers=None,
ragged=None,
inplace=True,
**kwargs):
if ragged not in (True, False):
raise ValueError('"ragged" kwarg has to be bool for lazy signals')
_logger.debug("Entering '_map_iterate'")
size = max(1, self.axes_manager.navigation_size)
from hyperspy.misc.utils import (create_map_objects,
map_result_construction)
func, iterators = create_map_objects(function, size, iterating_kwargs,
**kwargs)
iterators = (self._iterate_signal(), ) + iterators
res_shape = self.axes_manager._navigation_shape_in_array
# no navigation
if not len(res_shape) and ragged:
res_shape = (1,)
all_delayed = [dd(func)(data) for data in zip(*iterators)]
if ragged:
if inplace:
raise ValueError("In place computation is not compatible with "
"ragged array for lazy signal.")
# Shape of the signal dimension will change for the each nav.
# index, which means we can't predict the shape and the dtype needs
# to be python object to support numpy ragged array
sig_shape = ()
sig_dtype = np.dtype('O')
else:
one_compute = all_delayed[0].compute()
# No signal dimension for scalar
if np.isscalar(one_compute):
sig_shape = ()
sig_dtype = type(one_compute)
else:
sig_shape = one_compute.shape
sig_dtype = one_compute.dtype
pixels = [
da.from_delayed(
res, shape=sig_shape, dtype=sig_dtype) for res in all_delayed
]
if ragged:
if show_progressbar is None:
from hyperspy.defaults_parser import preferences
show_progressbar = preferences.General.show_progressbar
# We compute here because this is not sure if this is possible
# to make a ragged dask array: we need to provide a chunk size...
res_data = np.empty(res_shape, dtype=sig_dtype)
_logger.info("Lazy signal is computed to make the ragged array.")
if show_progressbar:
cm = ProgressBar
else:
cm = dummy_context_manager
with cm():
try:
for i, pixel in enumerate(pixels):
res_data.flat[i] = pixel.compute()
except MemoryError:
raise MemoryError("The use of 'ragged' array requires the "
"computation of the lazy signal.")
else:
if len(pixels) > 0:
for step in reversed(res_shape):
_len = len(pixels)
starts = range(0, _len, step)
ends = range(step, _len + step, step)
pixels = [
da.stack(
pixels[s:e], axis=0) for s, e in zip(starts, ends)
]
res_data = pixels[0]
res = map_result_construction(
self, inplace, res_data, ragged, sig_shape, lazy=not ragged)
return res
def _iterate_signal(self):
if self.axes_manager.navigation_size < 2:
yield self()
return
nav_dim = self.axes_manager.navigation_dimension
sig_dim = self.axes_manager.signal_dimension
nav_indices = self.axes_manager.navigation_indices_in_array[::-1]
nav_lengths = np.atleast_1d(
np.array(self.data.shape)[list(nav_indices)])
getitem = [slice(None)] * (nav_dim + sig_dim)
data = self._lazy_data()
for indices in product(*[range(l) for l in nav_lengths]):
for res, ind in zip(indices, nav_indices):
getitem[ind] = res
yield data[tuple(getitem)]
def _block_iterator(self,
flat_signal=True,
get=threaded.get,
navigation_mask=None,
signal_mask=None):
"""A function that allows iterating lazy signal data by blocks,
defining the dask.Array.
Parameters
----------
flat_signal: bool
returns each block flattened, such that the shape (for the
particular block) is (navigation_size, signal_size), with
optionally masked elements missing. If false, returns
the equivalent of s.inav[{blocks}].data, where masked elements are
set to np.nan or 0.
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not returned (flat) or
set to NaN or 0.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not returned (flat) or set
to NaN or 0.
"""
self._make_lazy()
data = self._data_aligned_with_axes
nav_chunks = data.chunks[:self.axes_manager.navigation_dimension]
indices = product(*[range(len(c)) for c in nav_chunks])
signalsize = self.axes_manager.signal_size
sig_reshape = (signalsize,) if signalsize else ()
data = data.reshape((self.axes_manager.navigation_shape[::-1] +
sig_reshape))
if signal_mask is None:
signal_mask = slice(None) if flat_signal else \
np.zeros(self.axes_manager.signal_size, dtype='bool')
else:
try:
signal_mask = to_array(signal_mask).ravel()
except ValueError:
# re-raise with a message
raise ValueError("signal_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(signal_mask)))
if flat_signal:
signal_mask = ~signal_mask
if navigation_mask is None:
nav_mask = da.zeros(
self.axes_manager.navigation_shape[::-1],
chunks=nav_chunks,
dtype='bool')
else:
try:
nav_mask = to_array(navigation_mask, chunks=nav_chunks)
except ValueError:
# re-raise with a message
raise ValueError("navigation_mask has to be a signal, numpy or"
" dask array, but "
"{} was given".format(type(navigation_mask)))
if flat_signal:
nav_mask = ~nav_mask
for ind in indices:
chunk = get(data.dask,
(data.name, ) + ind + (0,) * bool(signalsize))
n_mask = get(nav_mask.dask, (nav_mask.name, ) + ind)
if flat_signal:
yield chunk[n_mask, ...][..., signal_mask]
else:
chunk = chunk.copy()
value = np.nan if np.can_cast('float', chunk.dtype) else 0
chunk[n_mask, ...] = value
chunk[..., signal_mask] = value
yield chunk.reshape(chunk.shape[:-1] +
self.axes_manager.signal_shape[::-1])
def decomposition(
self,
normalize_poissonian_noise=False,
algorithm="SVD",
output_dimension=None,
signal_mask=None,
navigation_mask=None,
get=threaded.get,
num_chunks=None,
reproject=True,
print_info=True,
**kwargs
):
"""Perform Incremental (Batch) decomposition on the data.
The results are stored in ``self.learning_results``.
Read more in the :ref:`User Guide <big_data.decomposition>`.
Parameters
----------
normalize_poissonian_noise : bool, default False
If True, scale the signal to normalize Poissonian noise using
the approach described in [KeenanKotula2004]_.
algorithm : {'SVD', 'PCA', 'ORPCA', 'ORNMF'}, default 'SVD'
The decomposition algorithm to use.
output_dimension : int or None, default None
Number of components to keep/calculate. If None, keep all
(only valid for 'SVD' algorithm)
get : dask scheduler
the dask scheduler to use for computations;
default `dask.threaded.get`
num_chunks : int or None, default None
the number of dask chunks to pass to the decomposition model.
More chunks require more memory, but should run faster. Will be
increased to contain at least ``output_dimension`` signals.
navigation_mask : {BaseSignal, numpy array, dask array}
The navigation locations marked as True are not used in the
decomposition.
signal_mask : {BaseSignal, numpy array, dask array}
The signal locations marked as True are not used in the
decomposition.
reproject : bool, default True
Reproject data on the learnt components (factors) after learning.
print_info : bool, default True
If True, print information about the decomposition being performed.
In the case of sklearn.decomposition objects, this includes the
values of all arguments of the chosen sklearn algorithm.
**kwargs
passed to the partial_fit/fit functions.
References
----------
.. [KeenanKotula2004] M. Keenan and P. Kotula, "Accounting for Poisson noise
in the multivariate analysis of ToF-SIMS spectrum images", Surf.
Interface Anal 36(3) (2004): 203-212.
See Also
--------
* :py:meth:`~.learn.mva.MVA.decomposition` for non-lazy signals
* :py:func:`dask.array.linalg.svd`
* :py:class:`sklearn.decomposition.IncrementalPCA`
* :py:class:`~.learn.rpca.ORPCA`
* :py:class:`~.learn.ornmf.ORNMF`
"""
if kwargs.get("bounds", False):
warnings.warn(
"The `bounds` keyword is deprecated and will be removed "
"in v2.0. Since version > 1.3 this has no effect.",
VisibleDeprecationWarning,
)
kwargs.pop("bounds", None)
# Deprecate 'ONMF' for 'ORNMF'
if algorithm == "ONMF":
warnings.warn(
"The argument `algorithm='ONMF'` has been deprecated and will "
"be removed in future. Please use `algorithm='ORNMF'` instead.",
VisibleDeprecationWarning,
)
algorithm = "ORNMF"
# Check algorithms requiring output_dimension
algorithms_require_dimension = ["PCA", "ORPCA", "ORNMF"]
if algorithm in algorithms_require_dimension and output_dimension is None:
raise ValueError(
"`output_dimension` must be specified for '{}'".format(algorithm)
)
explained_variance = None
explained_variance_ratio = None
_al_data = self._data_aligned_with_axes
nav_chunks = _al_data.chunks[: self.axes_manager.navigation_dimension]
sig_chunks = _al_data.chunks[self.axes_manager.navigation_dimension :]
num_chunks = 1 if num_chunks is None else num_chunks
blocksize = np.min([multiply(ar) for ar in product(*nav_chunks)])
nblocks = multiply([len(c) for c in nav_chunks])
if output_dimension and blocksize / output_dimension < num_chunks:
num_chunks = np.ceil(blocksize / output_dimension)
blocksize *= num_chunks
# Initialize return_info and print_info
to_return = None
to_print = [
"Decomposition info:",
" normalize_poissonian_noise={}".format(normalize_poissonian_noise),
" algorithm={}".format(algorithm),
" output_dimension={}".format(output_dimension)
]
# LEARN
if algorithm == "PCA":
if not import_sklearn.sklearn_installed:
raise ImportError("algorithm='PCA' requires scikit-learn")
obj = import_sklearn.sklearn.decomposition.IncrementalPCA(n_components=output_dimension)
method = partial(obj.partial_fit, **kwargs)
reproject = True
to_print.extend(["scikit-learn estimator:", obj])
elif algorithm == "ORPCA":
from hyperspy.learn.rpca import ORPCA
batch_size = kwargs.pop("batch_size", None)
obj = ORPCA(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm == "ORNMF":
from hyperspy.learn.ornmf import ORNMF
batch_size = kwargs.pop("batch_size", None)
obj = ORNMF(output_dimension, **kwargs)
method = partial(obj.fit, batch_size=batch_size)
elif algorithm != "SVD":
raise ValueError("'algorithm' not recognised")
original_data = self.data
try:
_logger.info("Performing decomposition analysis")
if normalize_poissonian_noise:
_logger.info("Scaling the data to normalize Poissonian noise")
data = self._data_aligned_with_axes
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
nm = da.logical_not(
da.zeros(self.axes_manager.navigation_shape[::-1], chunks=nav_chunks)
if navigation_mask is None
else to_array(navigation_mask, chunks=nav_chunks)
)
sm = da.logical_not(
da.zeros(self.axes_manager.signal_shape[::-1], chunks=sig_chunks)
if signal_mask is None
else to_array(signal_mask, chunks=sig_chunks)
)
ndim = self.axes_manager.navigation_dimension
sdim = self.axes_manager.signal_dimension
bH, aG = da.compute(
data.sum(axis=tuple(range(ndim))),
data.sum(axis=tuple(range(ndim, ndim + sdim))),
)
bH = da.where(sm, bH, 1)
aG = da.where(nm, aG, 1)
raG = da.sqrt(aG)
rbH = da.sqrt(bH)
coeff = raG[(...,) + (None,) * rbH.ndim] * rbH[(None,) * raG.ndim + (...,)]
coeff.map_blocks(np.nan_to_num)
coeff = da.where(coeff == 0, 1, coeff)
data = data / coeff
self.data = data
# LEARN
if algorithm == "SVD":
reproject = False
from dask.array.linalg import svd
try:
self._unfolded4decomposition = self.unfold()
# TODO: implement masking
if navigation_mask or signal_mask:
raise NotImplementedError("Masking is not yet implemented for lazy SVD")
U, S, V = svd(self.data)
if output_dimension is None:
min_shape = min(min(U.shape), min(V.shape))
else:
min_shape = output_dimension
U = U[:, :min_shape]
S = S[:min_shape]
V = V[:min_shape]
factors = V.T
explained_variance = S ** 2 / self.data.shape[0]
loadings = U * S
finally:
if self._unfolded4decomposition is True:
self.fold()
self._unfolded4decomposition is False
else:
this_data = []
try:
for chunk in progressbar(
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
total=nblocks,
leave=True,
desc="Learn",
):
this_data.append(chunk)
if len(this_data) == num_chunks:
thedata = np.concatenate(this_data, axis=0)
method(thedata)
this_data = []
if len(this_data):
thedata = np.concatenate(this_data, axis=0)
method(thedata)
except KeyboardInterrupt: # pragma: no cover
pass
# GET ALREADY CALCULATED RESULTS
if algorithm == "PCA":
explained_variance = obj.explained_variance_
explained_variance_ratio = obj.explained_variance_ratio_
factors = obj.components_.T
elif algorithm == "ORPCA":
factors, loadings = obj.finish()
loadings = loadings.T
elif algorithm == "ORNMF":
factors, loadings = obj.finish()
loadings = loadings.T
# REPROJECT
if reproject:
if algorithm == "PCA":
method = obj.transform
def post(a):
return np.concatenate(a, axis=0)
elif algorithm == "ORPCA":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
elif algorithm == "ORNMF":
method = obj.project
def post(a):
return np.concatenate(a, axis=1).T
_map = map(
lambda thing: method(thing),
self._block_iterator(
flat_signal=True,
get=get,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
),
)
H = []
try:
for thing in progressbar(_map, total=nblocks, desc="Project"):
H.append(thing)
except KeyboardInterrupt: # pragma: no cover
pass
loadings = post(H)
if explained_variance is not None and explained_variance_ratio is None:
explained_variance_ratio = explained_variance / explained_variance.sum()
# RESHUFFLE "blocked" LOADINGS
ndim = self.axes_manager.navigation_dimension
if algorithm != "SVD": # Only needed for online algorithms
try:
loadings = _reshuffle_mixed_blocks(
loadings, ndim, (output_dimension,), nav_chunks
).reshape((-1, output_dimension))
except ValueError:
# In case the projection step was not finished, it's left
# as scrambled
pass
finally:
self.data = original_data
target = self.learning_results
target.decomposition_algorithm = algorithm
target.output_dimension = output_dimension
if algorithm != "SVD":
target._object = obj
target.factors = factors
target.loadings = loadings
target.explained_variance = explained_variance
target.explained_variance_ratio = explained_variance_ratio
# Rescale the results if the noise was normalized
if normalize_poissonian_noise is True:
target.factors = target.factors * rbH.ravel()[:, np.newaxis]
target.loadings = target.loadings * raG.ravel()[:, np.newaxis]
# Print details about the decomposition we just performed
if print_info:
print("\n".join([str(pr) for pr in to_print]))
def _reshuffle_mixed_blocks(array, ndim, sshape, nav_chunks):
"""Reshuffles dask block-shuffled array
Parameters
----------
array : np.ndarray
the array to reshuffle
ndim : int
the number of navigation (shuffled) dimensions
sshape : tuple of ints
The shape
"""
splits = np.cumsum([multiply(ar)
for ar in product(*nav_chunks)][:-1]).tolist()
if splits:
all_chunks = [
ar.reshape(shape + sshape)
for shape, ar in zip(
product(*nav_chunks), np.split(array, splits))
]
def split_stack_list(what, step, axis):
total = len(what)
if total != step:
return [
np.concatenate(
what[i:i + step], axis=axis)
for i in range(0, total, step)
]
else:
return np.concatenate(what, axis=axis)
for chunks, axis in zip(nav_chunks[::-1], range(ndim - 1, -1, -1)):
step = len(chunks)
all_chunks = split_stack_list(all_chunks, step, axis)
return all_chunks
else:
return array
| gpl-3.0 |
stefan-contiu/ml-crops | RandomForest/dt_ml_RandomForest.py | 1 | 1609 | __author__ = 'Stefan Contiu'
# RESULTS LOG : July 20th, 2015
# Accuracy : 0.993837304848
# Confusion Matrix :
# [[3044 11 2 1]
# [ 2 83 0 0]
# [ 0 0 766 13]
# [ 0 0 1 945]]
from time import time
###############################################
# load from csv training and testing sets
from numpy import genfromtxt
features_test = genfromtxt('d:/CODE/ml-crops/preproc/dataset/features_train.csv', delimiter=',')
classes_test = genfromtxt('d:/CODE/ml-crops/preproc/dataset/classes_train.csv', delimiter=',')
features_train = genfromtxt('d:/CODE/ml-crops/preproc/dataset/features_test.csv', delimiter=',')
classes_train = genfromtxt('d:/CODE/ml-crops/preproc/dataset/classes_test.csv', delimiter=',')
###############################################
# perform Random Forest classification
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
fit_start_time = time()
clf.fit(features_train, classes_train)
fit_end_time = time()
print "\nTraining time : ", round(fit_end_time - fit_start_time, 3), "s"
###############################################
# predict
predict_start_time = time()
classes_predicted = clf.predict(features_test)
predict_end_time = time()
print "Preciting time : ", round(predict_end_time - predict_start_time, 3), "s"
###############################################
# get accuracy
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
print "\nAccuracy : ", accuracy_score(classes_test, classes_predicted)
print "Confusion Matrix : \n", confusion_matrix(classes_test, classes_predicted) | mit |
harisbal/pandas | pandas/io/sas/sas_xport.py | 2 | 14714 | """
Read a SAS XPort format file into a Pandas DataFrame.
Based on code from Jack Cushman (github.com/jcushman/xport).
The file format is defined here:
https://support.sas.com/techsup/technote/ts140.pdf
"""
from datetime import datetime
import struct
import warnings
import numpy as np
from pandas.util._decorators import Appender
import pandas as pd
from pandas import compat
from pandas.io.common import BaseIterator, get_filepath_or_buffer
_correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_header1 = ("HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!"
"000000000000000001600000000")
_correct_header2 = ("HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_obs_header = ("HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_fieldkeys = ['ntype', 'nhfun', 'field_length', 'nvar0', 'name', 'label',
'nform', 'nfl', 'num_decimals', 'nfj', 'nfill', 'niform',
'nifl', 'nifd', 'npos', '_']
_base_params_doc = """\
Parameters
----------
filepath_or_buffer : string or file-like object
Path to SAS file or object implementing binary read method."""
_params2_doc = """\
index : identifier of index column
Identifier of column that should be used as index of the DataFrame.
encoding : string
Encoding for text data.
chunksize : int
Read file `chunksize` lines at a time, returns iterator."""
_format_params_doc = """\
format : string
File format, only `xport` is currently supported."""
_iterator_doc = """\
iterator : boolean, default False
Return XportReader object for reading file incrementally."""
_read_sas_doc = """Read a SAS file into a DataFrame.
%(_base_params_doc)s
%(_format_params_doc)s
%(_params2_doc)s
%(_iterator_doc)s
Returns
-------
DataFrame or XportReader
Examples
--------
Read a SAS Xport file:
>>> df = pd.read_sas('filename.XPT')
Read a Xport file in 10,000 line chunks:
>>> itr = pd.read_sas('filename.XPT', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % {"_base_params_doc": _base_params_doc,
"_format_params_doc": _format_params_doc,
"_params2_doc": _params2_doc,
"_iterator_doc": _iterator_doc}
_xport_reader_doc = """\
Class for reading SAS Xport files.
%(_base_params_doc)s
%(_params2_doc)s
Attributes
----------
member_info : list
Contains information about the file
fields : list
Contains information about the variables in the file
""" % {"_base_params_doc": _base_params_doc,
"_params2_doc": _params2_doc}
_read_method_doc = """\
Read observations from SAS Xport file, returning as data frame.
Parameters
----------
nrows : int
Number of rows to read from data file; if None, read whole
file.
Returns
-------
A DataFrame.
"""
def _parse_date(datestr):
""" Given a date in xport format, return Python date. """
try:
# e.g. "16FEB11:10:07:55"
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
except ValueError:
return pd.NaT
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start + length].strip()
start += length
del out['_']
return out
def _handle_truncated_float_vec(vec, nbytes):
# This feature is not well documented, but some SAS XPORT files
# have 2-7 byte "truncated" floats. To read these truncated
# floats, pad them with zeros on the right to make 8 byte floats.
#
# References:
# https://github.com/jcushman/xport/pull/3
# The R "foreign" library
if nbytes != 8:
vec1 = np.zeros(len(vec), np.dtype('S8'))
dtype = np.dtype('S%d,S%d' % (nbytes, 8 - nbytes))
vec2 = vec1.view(dtype=dtype)
vec2['f0'] = vec
return vec2
return vec
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee
class XportReader(BaseIterator):
__doc__ = _xport_reader_doc
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
chunksize=None):
self._encoding = encoding
self._lines_read = 0
self._index = index
self._chunksize = chunksize
if isinstance(filepath_or_buffer, str):
(filepath_or_buffer, encoding,
compression, should_close) = get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding)
if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = filepath_or_buffer.read()
try:
contents = contents.encode(self._encoding)
except UnicodeEncodeError:
pass
self.filepath_or_buffer = compat.BytesIO(contents)
self._read_header()
def close(self):
self.filepath_or_buffer.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
def _read_header(self):
self.filepath_or_buffer.seek(0)
# read file header
line1 = self._get_row()
if line1 != _correct_line1:
self.close()
raise ValueError("Header record is not an XPORT file.")
line2 = self._get_row()
fif = [['prefix', 24], ['version', 8], ['OS', 8],
['_', 24], ['created', 16]]
file_info = _split_line(line2, fif)
if file_info['prefix'] != "SAS SAS SASLIB":
self.close()
raise ValueError("Header record has invalid prefix.")
file_info['created'] = _parse_date(file_info['created'])
self.file_info = file_info
line3 = self._get_row()
file_info['modified'] = _parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
headflag1 = header1.startswith(_correct_header1)
headflag2 = (header2 == _correct_header2)
if not (headflag1 and headflag2):
self.close()
raise ValueError("Member header not found")
# usually 140, could be 135
fieldnamelength = int(header1[-5:-2])
# member info
mem = [['prefix', 8], ['set_name', 8], ['sasdata', 8],
['version', 8], ['OS', 8], ['_', 24], ['created', 16]]
member_info = _split_line(self._get_row(), mem)
mem = [['modified', 16], ['_', 16], ['label', 40], ['type', 8]]
member_info.update(_split_line(self._get_row(), mem))
member_info['modified'] = _parse_date(member_info['modified'])
member_info['created'] = _parse_date(member_info['created'])
self.member_info = member_info
# read field names
types = {1: 'numeric', 2: 'char'}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength * fieldcount
# round up to nearest 80
if datalength % 80:
datalength += 80 - datalength % 80
fielddata = self.filepath_or_buffer.read(datalength)
fields = []
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
field, fielddata = (fielddata[:fieldnamelength],
fielddata[fieldnamelength:])
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
field = field.ljust(140)
fieldstruct = struct.unpack('>hhhh8s40s8shhh2s8shhl52s', field)
field = dict(zip(_fieldkeys, fieldstruct))
del field['_']
field['ntype'] = types[field['ntype']]
fl = field['field_length']
if field['ntype'] == 'numeric' and ((fl < 2) or (fl > 8)):
self.close()
msg = "Floating field width {0} is not between 2 and 8."
raise TypeError(msg.format(fl))
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field['field_length']
fields += [field]
header = self._get_row()
if not header == _correct_obs_header:
self.close()
raise ValueError("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.filepath_or_buffer.tell()
self.nobs = self._record_count()
self.columns = [x['name'].decode() for x in self.fields]
# Setup the dtype.
dtypel = []
for i, field in enumerate(self.fields):
dtypel.append(('s' + str(i), "S" + str(field['field_length'])))
dtype = np.dtype(dtypel)
self._dtype = dtype
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def _record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = (self.filepath_or_buffer.tell() -
self.record_start)
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length
def get_chunk(self, size=None):
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def _missing_double(self, vec):
v = vec.view(dtype='u1,u1,u2,u4')
miss = (v['f1'] == 0) & (v['f2'] == 0) & (v['f3'] == 0)
miss1 = (((v['f0'] >= 0x41) & (v['f0'] <= 0x5a)) |
(v['f0'] == 0x5f) | (v['f0'] == 0x2e))
miss &= miss1
return miss
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
self.close()
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j, x in enumerate(self.columns):
vec = data['s%d' % j]
ntype = self.fields[j]['ntype']
if ntype == "numeric":
vec = _handle_truncated_float_vec(
vec, self.fields[j]['field_length'])
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]['ntype'] == 'char':
v = [y.rstrip() for y in vec]
if compat.PY3:
if self._encoding is not None:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
| bsd-3-clause |
bikash/h2o-dev | py2/h2o_cmd.py | 1 | 16424 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['key']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['key']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| apache-2.0 |
cainiaocome/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
winklerand/pandas | doc/sphinxext/numpydoc/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
MonoCloud/zipline | zipline/finance/performance/position_tracker.py | 4 | 15771 | from __future__ import division
import logbook
import numpy as np
import pandas as pd
from pandas.lib import checknull
try:
# optional cython based OrderedDict
from cyordereddict import OrderedDict
except ImportError:
from collections import OrderedDict
from six import iteritems, itervalues
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.finance.slippage import Transaction
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
import zipline.protocol as zp
from zipline.assets import (
Equity, Future
)
from zipline.errors import PositionTrackerMissingAssetFinder
from . position import positiondict
log = logbook.Logger('Performance')
class PositionTracker(object):
def __init__(self, asset_finder):
self.asset_finder = asset_finder
# sid => position object
self.positions = positiondict()
# Arrays for quick calculations of positions value
self._position_amounts = OrderedDict()
self._position_last_sale_prices = OrderedDict()
self._position_value_multipliers = OrderedDict()
self._position_exposure_multipliers = OrderedDict()
self._position_payout_multipliers = OrderedDict()
self._unpaid_dividends = pd.DataFrame(
columns=zp.DIVIDEND_PAYMENT_FIELDS,
)
self._positions_store = zp.Positions()
# Dict, keyed on dates, that contains lists of close position events
# for any Assets in this tracker's positions
self._auto_close_position_sids = {}
def _update_asset(self, sid):
try:
self._position_value_multipliers[sid]
self._position_exposure_multipliers[sid]
self._position_payout_multipliers[sid]
except KeyError:
# Check if there is an AssetFinder
if self.asset_finder is None:
raise PositionTrackerMissingAssetFinder()
# Collect the value multipliers from applicable sids
asset = self.asset_finder.retrieve_asset(sid)
if isinstance(asset, Equity):
self._position_value_multipliers[sid] = 1
self._position_exposure_multipliers[sid] = 1
self._position_payout_multipliers[sid] = 0
if isinstance(asset, Future):
self._position_value_multipliers[sid] = 0
self._position_exposure_multipliers[sid] = \
asset.contract_multiplier
self._position_payout_multipliers[sid] = \
asset.contract_multiplier
# Futures auto-close timing is controlled by the Future's
# auto_close_date property
self._insert_auto_close_position_date(
dt=asset.auto_close_date,
sid=sid
)
def _insert_auto_close_position_date(self, dt, sid):
"""
Inserts the given SID in to the list of positions to be auto-closed by
the given dt.
Parameters
----------
dt : pandas.Timestamp
The date before-which the given SID will be auto-closed
sid : int
The SID of the Asset to be auto-closed
"""
if dt is not None:
self._auto_close_position_sids.setdefault(dt, set()).add(sid)
def auto_close_position_events(self, next_trading_day):
"""
Generates CLOSE_POSITION events for any SIDs whose auto-close date is
before or equal to the given date.
Parameters
----------
next_trading_day : pandas.Timestamp
The time before-which certain Assets need to be closed
Yields
------
Event
A close position event for any sids that should be closed before
the next_trading_day parameter
"""
past_asset_end_dates = set()
# Check the auto_close_position_dates dict for SIDs to close
for date, sids in self._auto_close_position_sids.items():
if date > next_trading_day:
continue
past_asset_end_dates.add(date)
for sid in sids:
# Yield a CLOSE_POSITION event
event = Event({
'dt': date,
'type': DATASOURCE_TYPE.CLOSE_POSITION,
'sid': sid,
})
yield event
# Clear out past dates
while past_asset_end_dates:
self._auto_close_position_sids.pop(past_asset_end_dates.pop())
def update_last_sale(self, event):
# NOTE, PerformanceTracker already vetted as TRADE type
sid = event.sid
if sid not in self.positions:
return 0
price = event.price
if checknull(price):
return 0
pos = self.positions[sid]
old_price = pos.last_sale_price
pos.last_sale_date = event.dt
pos.last_sale_price = price
self._position_last_sale_prices[sid] = price
# Calculate cash adjustment on assets with multipliers
return ((price - old_price) * self._position_payout_multipliers[sid]
* pos.amount)
def update_positions(self, positions):
# update positions in batch
self.positions.update(positions)
for sid, pos in iteritems(positions):
self._position_amounts[sid] = pos.amount
self._position_last_sale_prices[sid] = pos.last_sale_price
self._update_asset(sid)
def update_position(self, sid, amount=None, last_sale_price=None,
last_sale_date=None, cost_basis=None):
pos = self.positions[sid]
if amount is not None:
pos.amount = amount
self._position_amounts[sid] = amount
self._position_values = None # invalidate cache
self._update_asset(sid=sid)
if last_sale_price is not None:
pos.last_sale_price = last_sale_price
self._position_last_sale_prices[sid] = last_sale_price
self._position_values = None # invalidate cache
if last_sale_date is not None:
pos.last_sale_date = last_sale_date
if cost_basis is not None:
pos.cost_basis = cost_basis
def execute_transaction(self, txn):
# Update Position
# ----------------
sid = txn.sid
position = self.positions[sid]
position.update(txn)
self._position_amounts[sid] = position.amount
self._position_last_sale_prices[sid] = position.last_sale_price
self._update_asset(sid)
def handle_commission(self, commission):
# Adjust the cost basis of the stock if we own it
if commission.sid in self.positions:
self.positions[commission.sid].\
adjust_commission_cost_basis(commission)
@property
def position_values(self):
iter_amount_price_multiplier = zip(
itervalues(self._position_amounts),
itervalues(self._position_last_sale_prices),
itervalues(self._position_value_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
@property
def position_exposures(self):
iter_amount_price_multiplier = zip(
itervalues(self._position_amounts),
itervalues(self._position_last_sale_prices),
itervalues(self._position_exposure_multipliers),
)
return [
price * amount * multiplier for
price, amount, multiplier in iter_amount_price_multiplier
]
def calculate_positions_value(self):
if len(self.position_values) == 0:
return np.float64(0)
return sum(self.position_values)
def calculate_positions_exposure(self):
if len(self.position_exposures) == 0:
return np.float64(0)
return sum(self.position_exposures)
def _longs_count(self):
return sum(1 for i in self.position_exposures if i > 0)
def _long_exposure(self):
return sum(i for i in self.position_exposures if i > 0)
def _long_value(self):
return sum(i for i in self.position_values if i > 0)
def _shorts_count(self):
return sum(1 for i in self.position_exposures if i < 0)
def _short_exposure(self):
return sum(i for i in self.position_exposures if i < 0)
def _short_value(self):
return sum(i for i in self.position_values if i < 0)
def _gross_exposure(self):
return self._long_exposure() + abs(self._short_exposure())
def _gross_value(self):
return self._long_value() + abs(self._short_value())
def _net_exposure(self):
return self.calculate_positions_exposure()
def _net_value(self):
return self.calculate_positions_value()
def handle_split(self, split):
if split.sid in self.positions:
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[split.sid]
leftover_cash = position.handle_split(split)
self._position_amounts[split.sid] = position.amount
self._position_last_sale_prices[split.sid] = \
position.last_sale_price
self._update_asset(split.sid)
return leftover_cash
def _maybe_earn_dividend(self, dividend):
"""
Take a historical dividend record and return a Series with fields in
zipline.protocol.DIVIDEND_FIELDS (plus an 'id' field) representing
the cash/stock amount we are owed when the dividend is paid.
"""
if dividend['sid'] in self.positions:
return self.positions[dividend['sid']].earn_dividend(dividend)
else:
return zp.dividend_payment()
def earn_dividends(self, dividend_frame):
"""
Given a frame of dividends whose ex_dates are all the next trading day,
calculate and store the cash and/or stock payments to be paid on each
dividend's pay date.
"""
earned = dividend_frame.apply(self._maybe_earn_dividend, axis=1)\
.dropna(how='all')
if len(earned) > 0:
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
self._unpaid_dividends = pd.concat(
[self._unpaid_dividends, earned],
)
def _maybe_pay_dividend(self, dividend):
"""
Take a historical dividend record, look up any stored record of
cash/stock we are owed for that dividend, and return a Series
with fields drawn from zipline.protocol.DIVIDEND_PAYMENT_FIELDS.
"""
try:
unpaid_dividend = self._unpaid_dividends.loc[dividend['id']]
return unpaid_dividend
except KeyError:
return zp.dividend_payment()
def pay_dividends(self, dividend_frame):
"""
Given a frame of dividends whose pay_dates are all the next trading
day, grant the cash and/or stock payments that were calculated on the
given dividends' ex dates.
"""
payments = dividend_frame.apply(self._maybe_pay_dividend, axis=1)\
.dropna(how='all')
# Mark these dividends as paid by dropping them from our unpaid
# table.
self._unpaid_dividends.drop(payments.index)
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
stock_payments = payments[payments['payment_sid'].notnull()]
for _, row in stock_payments.iterrows():
stock = row['payment_sid']
share_count = row['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
position = self.positions[stock]
position.amount += share_count
self._position_amounts[stock] = position.amount
self._position_last_sale_prices[stock] = position.last_sale_price
self._update_asset(stock)
# Add cash equal to the net cash payed from all dividends. Note that
# "negative cash" is effectively paid if we're short an asset,
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
net_cash_payment = payments['cash_amount'].fillna(0).sum()
return net_cash_payment
def maybe_create_close_position_transaction(self, event):
if not self._position_amounts.get(event.sid):
return None
if 'price' in event:
price = event.price
else:
price = self._position_last_sale_prices[event.sid]
txn = Transaction(
sid=event.sid,
amount=(-1 * self._position_amounts[event.sid]),
dt=event.dt,
price=price,
commission=0,
order_id=0
)
return txn
def get_positions(self):
positions = self._positions_store
for sid, pos in iteritems(self.positions):
if pos.amount == 0:
# Clear out the position if it has become empty since the last
# time get_positions was called. Catching the KeyError is
# faster than checking `if sid in positions`, and this can be
# potentially called in a tight inner loop.
try:
del positions[sid]
except KeyError:
pass
continue
# Note that this will create a position if we don't currently have
# an entry
position = positions[sid]
position.amount = pos.amount
position.cost_basis = pos.cost_basis
position.last_sale_price = pos.last_sale_price
return positions
def get_positions_list(self):
positions = []
for sid, pos in iteritems(self.positions):
if pos.amount != 0:
positions.append(pos.to_dict())
return positions
def __getstate__(self):
state_dict = {}
state_dict['asset_finder'] = self.asset_finder
state_dict['positions'] = dict(self.positions)
state_dict['unpaid_dividends'] = self._unpaid_dividends
state_dict['auto_close_position_sids'] = self._auto_close_position_sids
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PositionTracker saved state is too old.")
self.asset_finder = state['asset_finder']
self.positions = positiondict()
# note that positions_store is temporary and gets regened from
# .positions
self._positions_store = zp.Positions()
self._unpaid_dividends = state['unpaid_dividends']
self._auto_close_position_sids = state['auto_close_position_sids']
# Arrays for quick calculations of positions value
self._position_amounts = OrderedDict()
self._position_last_sale_prices = OrderedDict()
self._position_value_multipliers = OrderedDict()
self._position_exposure_multipliers = OrderedDict()
self._position_payout_multipliers = OrderedDict()
# Update positions is called without a finder
self.update_positions(state['positions'])
| apache-2.0 |
xaratustrah/iq_apps | eta.py | 1 | 1411 | #!/usr/bin/env python
"""
Plot the PSD maximum at different cooling energies.
xaratustrah oct-2014
"""
import os, sys
import numpy as np
from pylab import psd
import matplotlib.pyplot as plt
from iqtools import *
def do_it(filename):
dic1, _ = read_tiq(filename, 1, 1024, 1)
center1 = dic1['center']
fs1 = dic1['fs']
nframes_tot = dic1['nframes_tot']
naf = nacnt = np.array([])
for i in range(1, nframes_tot, 100):
dic1, _ = read_tiq(filename, 1, 1024, i)
x1 = dic1['data']
Pxx1, freqs1 = psd(x1, NFFT=1024, Fs=fs1, noverlap=512)
naf = np.append(naf, freqs1[Pxx1.argmax()])
nacnt = np.append(nacnt, i)
naf = naf + center1
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(nacnt, naf, 'r.')
ax.annotate('ISO at: {} [MHz]'.format(naf.max() / 1.0e6), xy=(nacnt[naf.argmax()], naf.max()), xycoords='data',
xytext=(0.5, 0.5), textcoords='figure fraction',
arrowprops=dict(width=1, headwidth=5, edgecolor='blue', facecolor='blue', shrink=0.05))
plt.ylabel('Frequency [Hz]')
plt.xlabel('Frame Number')
plt.title('File: ' + filename.split('/')[3])
plt.grid(True)
fig.savefig(os.path.splitext(filename)[0] + '.pdf')
plt.show()
if __name__ == "__main__":
if (len(sys.argv) == 2):
do_it(sys.argv[1])
else:
print('Please provide a filename!')
| gpl-2.0 |
HaiQW/Optimal | utils/data.py | 1 | 2938 | #!/usr/bin/env python
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
def gen_data_set(n_normals=100, n_moons=100, n_scurves=100, n_circles=100):
"""
Generate synthetic data set with manifold rare category suite for RCD and
RCE scenario.
"""
if n_normals <= 0 and n_moons <= 0 and n_scurves <= 0 and n_circles <= 0:
return False
# Generate all sub-manifold synthetic data sets. First generate moon-shaped
# data set, labels (0 or 1).
moons_data, moons_labels = datasets.make_moons(n_samples=n_moons, noise=0.05)
moons_data[:, 0] = moons_data[:, 0] * 0.5 + 5
moons_data[:, 1] = moons_data[:, 1] * 0.5 + 5
# Generate s_curve-shaped data set, labels (2).
scurve_data, scurve_labels = datasets.samples_generator.make_s_curve(n_scurves, random_state=0)
scurve_labels = np.ones(shape=(n_scurves)) * 2
scurve_data[:, 0] = scurve_data[:, 0] * 0.5 + 5
scurve_data[:, 2] = scurve_data[:, 2] * 0.5 - 5
# Generate circle_data-shaped data set, labels (3 or 4).
circle_data, circle_lables = datasets.make_circles(n_samples=n_circles)
circle_lables = circle_lables + 3
circle_data[:, 0] = circle_data[:, 0] * 0.5 - 5
circle_data[:, 1] = circle_data[:, 1] * 0.5 - 5
# Generate normal-shaped dataset, labels (5 or 6).
normal_data, normal_labels = datasets.make_blobs(n_samples=n_normals,
n_features=2, centers=[0, 0])
normal_labels = normal_labels + 5
normal_data = normal_data * 3
# Combine sub-manifold datasets to form the final dataset.
final_data = np.append(np.append(moons_data, scurve_data[:, [0, 2]], axis=0),
np.append(circle_data, normal_data, axis=0), axis=0)
final_labels = np.append(np.append(moons_labels, scurve_labels, axis=0),
np.append(circle_lables, normal_labels, axis=0), axis=0)
return final_data, final_labels
def plot_figure(dataset, labels, fig_name):
"""
Plot the dataset to visualize it and save the figure to eps formated file.
"""
fig = plt.figure()
plt.scatter(dataset[:, 0], dataset[:, 1], marker='o', linewidths=0, s=10, c=labels)
plt.savefig('/home/haiqw/Dropbox/PycharmProjects/SyntheticData/%s' % fig_name,
format='eps', bbox_inches='tight', dpi=1200)
plt.show(fig)
def save_data(dataset, labels, file_name):
"""
Write the synthetic dataset to file
"""
np.savetxt(file_name, np.append(dataset, np.reshape(labels, newshape=(labels.size, 1)), axis=1))
def main():
"""
Main function to test the data generator module.:
"""
data_set, labels = gen_data_set(n_normals=1000, n_moons=100, n_scurves=100, n_circles=100)
plot_figure(data_set, labels, 'name.eps')
save_data(data_set, labels, './Synthetic_data.txt')
if __name__ == "__main__":
main() | apache-2.0 |
0x0all/scikit-learn | sklearn/tests/test_grid_search.py | 4 | 27173 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import distributions
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def test_parameter_grid():
"""Test basic properties of ParameterGrid."""
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
has_empty = ParameterGrid([{'C': [1, 10]}, {}])
assert_equal(len(has_empty), 3)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}])
def test_grid_search():
"""Test that the best estimator contains the right value for foo_param"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
"""Test search over a "grid" with only one point.
Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]})
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
"""Test that grid search can be used for model selection only"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
"""Test that grid search will capture errors on data with different
length"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
"""Test that grid search works with both dense and sparse matrices"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
#np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
"""Test that grid search works when the input features are given in the
form of a precomputed kernel matrix """
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
"""Test that grid search returns an error with a non-square precomputed
training kernel matrix"""
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
"""Test that grid search returns an error when using a kernel_function"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
"""Regression test for bug in refitting
Simulates re-fitting a broken estimator; this used to break with
sparse SVMs.
"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_X_as_list():
"""Pass X as list in GridSearchCV"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
"""Pass y as list in GridSearchCV"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_bad_estimator():
# test grid-search with clustering algorithm which doesn't support
# "predict"
sc = SpectralClustering()
grid_search = GridSearchCV(sc, param_grid=dict(gamma=[.1, 1, 10]),
scoring='ari')
assert_raise_message(TypeError, "'score' or a 'predict'", grid_search.fit,
[[1]])
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": distributions.uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=distributions.expon(scale=10),
gamma=distributions.expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
"""Test that a fit search can be pickled"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
""" Test search with multi-output estimator"""
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters, cv=cv)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters, cv=cv)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
"""Test predict_proba when disabled on estimator."""
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
""" Test GridSearchCV with Imputer """
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
"""GridSearchCV with on_error != 'raise'
Ensures that a warning is raised and score reset where appropriate.
"""
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
"""GridSearchCV with on_error == 'raise' raises the error"""
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
| bsd-3-clause |
saketkc/gencode_regions | plot_size_distribution.py | 1 | 2564 | #!/usr/bin/env python
import argparse
import os
import sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
matplotlib.style.use("seaborn-muted")
import pandas
import seaborn as sns
sns.set_style(style="white")
def load_df(filepath, name, cutoff):
"""Load bed file and return sites less
than 'cutoff' length"""
df = pandas.read_table(
filepath,
header=None,
names=["chrom", "start", "stop", "name", "score", "strand"],
)
df["length"] = df.stop - df.start
df["domain"] = name
df = df[df["length"] < cutoff]
return df
def plot_size_distribution(input_dir, title, outprefix):
first_coding_exon = os.path.join(input_dir, "first_exons.bed")
last_coding_exon = os.path.join(input_dir, "last_exons.bed")
utr3 = os.path.join(input_dir, "3UTRs.bed")
utr5 = os.path.join(input_dir, "5UTRs.bed")
intron = os.path.join(input_dir, "introns.bed")
cds = os.path.join(input_dir, "cds.bed")
first_coding_exon_df = load_df(first_coding_exon, "first_coding_exon", 1500)
last_coding_exon_df = load_df(last_coding_exon, "last_coding_exon", 1500)
utr3_df = load_df(utr3, "utr3", 1500)
utr5_df = load_df(utr5, "utr5", 1500)
intron_df = load_df(intron, "intron", 1500)
cds_df = load_df(cds, "cds", 1500)
fig, ax = plt.subplots()
fig, axs = plt.subplots(figsize=(12, 12), ncols=2, nrows=3)
data_all = [
[
(first_coding_exon_df["length"], "First coding exon"),
(last_coding_exon_df["length"], "Last coding exon"),
],
[(utr3_df["length"], "3'UTR"), (utr5_df["length"], "5'UTR")],
[(intron_df["length"], "All Introns"), (cds_df["length"], "CDS")],
]
for row in (0, 1, 2):
for col in (0, 1):
data = data_all[row][col]
print(row, col)
sns.distplot(data[0], ax=axs[row, col], kde=False, label=data[1], color="b")
axs[row, col].set_title(data[1]) # legend()
axs[row, col].set_xlabel("")
axs[0, 0].set_ylabel("Frequency")
axs[1, 0].set_ylabel("Frequency")
axs[2, 0].set_ylabel("Frequency")
fig.suptitle(title)
plt.savefig("{}.png".format(outprefix))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("inputdir", help="Input directory")
parser.add_argument("title", help="Title")
parser.add_argument("outprefix", help="Prefix of output file")
args = parser.parse_args()
plot_size_distribution(args.inputdir, args.title, args.outprefix)
| bsd-2-clause |
karenlmasters/ComputationalPhysicsUnit | GraphicsVisualisation/double_pendulum_animated.py | 1 | 2314 | # Double pendulum formula translated from the C code at
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
from numpy import sin, cos, pi, array
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
G = 9.8 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2]-state[0]
den1 = (M1+M2)*L1 - M2*L1*cos(del_)*cos(del_)
dydx[1] = (M2*L1*state[1]*state[1]*sin(del_)*cos(del_)
+ M2*G*sin(state[2])*cos(del_) + M2*L2*state[3]*state[3]*sin(del_)
- (M1+M2)*G*sin(state[0]))/den1
dydx[2] = state[3]
den2 = (L2/L1)*den1
dydx[3] = (-M2*L2*state[3]*state[3]*sin(del_)*cos(del_)
+ (M1+M2)*G*sin(state[0])*cos(del_)
- (M1+M2)*L1*state[1]*state[1]*sin(del_)
- (M1+M2)*G*sin(state[2]))/den2
return dydx
# create a time array from 0..100 sampled at 0.1 second steps
dt = 0.05
t = np.arange(0.0, 20, dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = 120.0
w1 = 0.0
th2 = -10.0
w2 = 0.0
rad = pi/180
# initial state
state = np.array([th1, w1, th2, w2])*pi/180.
# integrate your ODE using scipy.integrate.
y = integrate.odeint(derivs, state, t)
x1 = L1*sin(y[:,0])
y1 = -L1*cos(y[:,0])
x2 = L2*sin(y[:,2]) + x1
y2 = -L2*cos(y[:,2]) + y1
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))
ax.grid()
line, = ax.plot([], [], 'o-', lw=2)
time_template = 'time = %.1fs'
time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
def init():
line.set_data([], [])
time_text.set_text('')
return line, time_text
def animate(i):
thisx = [0, x1[i], x2[i]]
thisy = [0, y1[i], y2[i]]
line.set_data(thisx, thisy)
time_text.set_text(time_template%(i*dt))
return line, time_text
ani = animation.FuncAnimation(fig, animate, np.arange(1, len(y)),
interval=25, blit=False, init_func=init)
#ani.save('double_pendulum.mp4', fps=15, clear_temp=True)
plt.show()
| apache-2.0 |
rstebbing/shards | refine_multiple_shards_joint.py | 1 | 3417 | ##########################################
# File: refine_multiple_shards_joint.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import argparse
import matplotlib.pyplot as plt
import numpy as np
import visualise_progress as vis
from functools import partial
from operator import itemgetter
from solve import fit_and_colour_shards
from time import time
# Requires `rscommon`.
from rscommon.pickle_ import dump
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_all_iterations_Xy_path')
parser.add_argument('output_dir')
parser.add_argument('--visualise-progress',
action='store_true',
default=False)
parser.add_argument('--ftol', type=float, default=1e-8)
parser.add_argument('--xtol', type=float, default=1e-8)
parser.add_argument('--maxfev', type=int, default=0)
parser.add_argument('--epsilon', type=float, default=1e-6)
args = parser.parse_args()
ensure_output_path = partial(vis.ensure_path, args.output_dir)
all_iterations_Xy, orig_args = np.load(args.input_all_iterations_Xy_path)
print '<-', orig_args['input_path']
I = plt.imread(orig_args['input_path']).astype(np.float64)[..., :3]
if orig_args['base'] == 'white':
J0 = np.ones_like(I)
elif orig_args['base'] == 'black':
J0 = np.zeros_like(I)
else:
head, tail = os.path.split(orig_args['base'])
root, ext = os.path.splitext(tail)
if ext == '.dat':
J0 = np.load(orig_args['base'])
else:
J0 = plt.imread(orig_args['base']).astype(np.float64)[..., :3]
Xs0, ys0 = zip(*map(itemgetter(-1), all_iterations_Xy))
print 'Solving with `fit_and_colour_shards` ...'
np.seterr(over='ignore')
t0 = time()
(Xs, ys, all_Xs_ys), (exit_code, E0, E1, J, J1) = fit_and_colour_shards(
I, J0, orig_args['alpha'],
Xs0, ys0,
k=orig_args['k'],
epsilon=args.epsilon,
ftol=args.ftol,
xtol=args.xtol,
maxfev=args.maxfev,
return_info=True,
verbose=True)
t1 = time()
np.seterr(over='warn')
print 'E0:', E0
print 'E1:', E1
print 'Exit code: %d' % exit_code
print 'Time taken: %.3fs' % (t1 - t0)
output_path = ensure_output_path('all_Xs_ys.dat')
print '->', output_path
dump(output_path, (all_Xs_ys, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J.dat')
print '->', output_path
dump(output_path, (J, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J1.dat')
print '->', output_path
dump(output_path, (J1, args.__dict__), raise_on_failure=False)
if args.visualise_progress:
output_path = ensure_output_path('J.png')
print '->', output_path
f, ax = vis.make_image_figure(J)
vis.save_image_figure(output_path, f, J.shape)
output_path = ensure_output_path('J1.png')
print '->', output_path
f, ax = vis.make_image_figure(J1)
vis.save_image_figure(output_path, f, J1.shape)
if __name__ == '__main__':
main()
| mit |
aitoralmeida/eu-elections | data/statistic_analyzer.py | 1 | 35806 | # -*- coding: utf-8 -*-
"""
Created on Thu May 15 14:51:05 2014
@author: aitor
"""
import mysql.connector
import cache
import networkx as nx
import scipy.stats
import csv
import pandas as pd
from pandas import DataFrame
import numpy as np
import matplotlib as plt
config = {
'user': 'elections',
'password': 'elections',
'host': 'thor.deusto.es',
'database': 'eu_test2',
}
#**************************Build social network graphs****************************
#**************************Build social network graphs****************************
#**************************Build social network graphs****************************
def get_country_relations():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
G = nx.Graph()
country_tweets = {}
for party1 in cache.parties:
try:
if cache.parties[party1]['group_id'] == 'NI - SPAIN':
continue
country1 = cache.locations[cache.parties[party1]['location']]
if not G.has_node(country1):
G.add_node(country1)
except:
continue
#
# cursor.execute("SELECT count(*) FROM tweets where user_id = '%s' " % cache.parties[party1]['user_id'])
# for result in cursor:
# total_tweets = result[0]
#
# if country_tweets.has_key(country1):
# country_tweets[country1] += total_tweets
# else:
# country_tweets[country1] = total_tweets
for party2 in cache.parties:
if cache.parties[party2]['group_id'] == 'NI - SPAIN':
continue
if party1 != party2:
try:
country2 = cache.locations[cache.parties[party2]['location']]
if not G.has_node(country2):
G.add_node(country2)
except:
continue
cursor.execute("SELECT sum(weight) FROM interactions WHERE user_id='%s' AND target_id='%s'" % (cache.parties[party1]['user_id'], cache.parties[party2]['user_id']))
weight = 0
for result in cursor:
try:
weight = int(result[0])
except:
pass
if weight != 0:
if G.has_edge(country1, country2):
G.edge[country1][country2]['weight'] += weight
else:
G.add_edge(country1, country2, weight = weight)
cursor.close()
cnx.close()
print country_tweets
nx.write_gexf(G, open('./sna/country_relations.gexf', 'w'))
return G
def get_group_relations():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
G = nx.Graph()
for party1 in cache.parties:
try:
if cache.parties[party1]['group_id'] == 'NI - SPAIN':
continue
group1 = cache.parties[party1]['group_id']
if not G.has_node(group1):
G.add_node(group1)
except:
continue
for party2 in cache.parties:
if cache.parties[party2]['group_id'] == 'NI - SPAIN':
continue
if party1 != party2:
try:
group2 = cache.parties[party2]['group_id']
if not G.has_node(group2):
G.add_node(group2)
except:
continue
cursor.execute("SELECT sum(weight) FROM interactions WHERE user_id='%s' AND target_id='%s'" % (cache.parties[party1]['user_id'], cache.parties[party2]['user_id']))
weight = 0
for result in cursor:
try:
weight = int(result[0])
except:
pass
if weight != 0:
if G.has_edge(group1, group2):
G.edge[group1][group2]['weight'] += weight
else:
G.add_edge(group1, group2, weight = weight)
cursor.close()
cnx.close()
nx.write_gexf(G, open('./sna/group_relations.gexf', 'w'))
return G
def get_party_relations():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
G = nx.Graph()
for party1 in cache.parties:
if cache.parties[party1]['group_id'] == 'NI - SPAIN':
continue
for party2 in cache.parties:
if cache.parties[party2]['group_id'] == 'NI - SPAIN':
continue
if party1 == party2:
continue
if not G.has_node(party1):
G.add_node(party1)
if not G.has_node(party2):
G.add_node(party2)
cursor.execute("SELECT sum(weight) FROM interactions WHERE user_id='%s' AND target_id='%s'" % (cache.parties[party1]['user_id'], cache.parties[party2]['user_id']))
weight = 0
for result in cursor:
try:
weight = int(result[0])
except:
pass
if weight != 0:
if G.has_edge(party1, party2):
G.edge[party1][party2]['weight'] += weight
else:
G.add_edge(party1, party2, weight = weight)
cursor.close()
cnx.close()
# print len(G.nodes())
# print len(G.edges())
nx.write_gexf(G, open('./sna/party_relations.gexf', 'w'))
return G
#******************************************SNA**************************************
#******************************************SNA**************************************
#******************************************SNA**************************************
#******************************************SNA**************************************
def get_sna(path):
sna_data = {}
print 'Building relations graph'
G = nx.read_gexf(path)
print 'Nodes:', len(G.nodes())
print 'Edges:', len(G.edges())
print 'Calculating centralities:'
print ' -degrees'
degrees = G.degree()
for c in degrees:
sna_data[c] = { 'degree':degrees[c],
'betweenness':0,
'closeness':0,
'eigenvector':0}
print ' -betweenness'
betweenness = nx.betweenness_centrality(G)
for c in betweenness:
sna_data[c]['betweenness'] = betweenness[c]
print ' -closeness'
closeness = nx.closeness_centrality(G)
for c in closeness:
sna_data[c]['closeness'] = closeness[c]
print ' -eigenvector'
eigenvector = nx.eigenvector_centrality_numpy(G)
for c in eigenvector:
sna_data[c]['eigenvector'] = eigenvector[c]
return sna_data
#def get_candidate_data():
# cnx = mysql.connector.connect(**config)
# cursor = cnx.cursor()
#
#
# candidates = ['JunckerEU', 'tsipras_eu', 'GuyVerhofstadt', 'josebove', 'SkaKeller', 'MartinSchulz']
# for candidate in candidates:
#
# print '*******************************************'
# print candidate
# c_id = ''
# query = "SELECT id from twitter_users WHERE screen_name='%s';" % (candidate)
# cursor.execute(query)
# for result in cursor:
# c_id = result[0]
# print c_id
#
# print
# print 'LANGUAGES'
# query = "SELECT lang, total FROM language_candidate WHERE candidate_id='%s' ORDER BY total DESC;" % (c_id)
# cursor.execute(query)
# for result in cursor:
# print result[0], result[1]
#
# print
# print 'MENTIONS'
# query = "SELECT eu_total, co_total FROM europe_candidate WHERE candidate_id='%s';" % (c_id)
# cursor.execute(query)
# for result in cursor:
# print 'Europe', result[0], 'Country', result[1]
#
# print
# print 'HASHTAGS'
# query = "SELECT text, SUM(total) FROM hash_candidate WHERE candidate_id='%s' GROUP BY text ORDER BY sum(total) DESC;" % (c_id)
# cursor.execute(query)
# i = 0
# for result in cursor:
# if i < 6:
# print result[0], result[1]
# i +=1
# print
# print
#
#
# cursor.close()
# cnx.close()
#******************************************ACTIVITY**************************************
#******************************************ACTIVITY**************************************
#******************************************ACTIVITY**************************************
#******************************************ACTIVITY**************************************
def get_party_activity():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
party_data = {}
for party in cache.parties:
try:
screen_name = cache.twitter_ids[cache.parties[party]['user_id']]
except:
continue
if cache.parties[party]['group_id'] == 'NI - SPAIN':
continue
cursor.execute("select count(*) FROM tweets WHERE user_id = '%s'" % cache.parties[party]['user_id'])
total = 0
for r in cursor:
total = r[0]
if party_data.has_key(screen_name):
party_data[screen_name] += total
else:
party_data[screen_name] = total
cursor.close()
cnx.close()
return party_data
def get_groups_activity():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
tweets_group = {}
parties_group = {}
for party in cache.parties:
if cache.parties[party]['group_id'] == 'NI - SPAIN':
continue
group = cache.parties[party]['group_id']
cursor.execute("select count(*) FROM tweets WHERE user_id = '%s'" % cache.parties[party]['user_id'])
total = 0
for r in cursor:
total = r[0]
if tweets_group.has_key(group):
tweets_group[group] += total
else:
tweets_group[group] = total
if parties_group.has_key(group):
parties_group[group] += 1
else:
parties_group[group] = 1
cursor.close()
cnx.close()
return tweets_group, parties_group
def get_countries_activity():
print 'Recovering countries twitter activity'
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
country_data = {}
for party in cache.parties:
if cache.parties[party]['group_id'] == 'NI - SPAIN':
continue
try:
country = cache.locations[cache.parties[party]['location']]
except:
continue
cursor.execute("select count(*) FROM tweets WHERE user_id = '%s'" % cache.parties[party]['user_id'])
activity = 0
for r in cursor:
activity = r[0]
if country_data.has_key(country):
country_data[country] += activity
else:
country_data[country] = activity
cursor.close()
cnx.close()
return country_data
#******************************************DISCOURSE**************************************
#******************************************DISCOURSE**************************************
#******************************************DISCOURSE**************************************
#******************************************DISCOURSE**************************************
def get_countries_discourse():
print 'Recovering discourse info'
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
cursor.execute('SELECT country_name, eu_total, co_total FROM europe_country')
country_data = {}
for r in cursor:
country_name = r[0]
europe = r[1]
country = r[2]
country_data[country_name] = { 'europe':europe,
'country':country}
return country_data
def get_groups_discourse():
print 'Recovering discourse info'
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
cursor.execute('SELECT group_id, eu_total, co_total FROM europe_group')
group_data = {}
for r in cursor:
group = r[0]
europe = r[1]
country = r[2]
group_data[group] = { 'europe':europe,
'country':country}
return group_data
def get_total_tweets_by_date_country():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
tweets_country_day = {}
countries = set()
day_names = set()
for i, party in enumerate(cache.parties):
# print '%i of %i' % (i, len(cache.parties))
try:
if cache.parties[party]['group_id'] == 'NI - SPAIN':
continue
country = cache.locations[cache.parties[party]['location']]
countries.add(country)
if not tweets_country_day.has_key(country):
tweets_country_day[country] = {}
except:
continue
cursor.execute("SELECT created_at, count(*) FROM tweets WHERE user_id='%s' GROUP BY created_at" % cache.parties[party]['user_id'])
for result in cursor:
day = result[0]
day_names.add(str(day))
total = result[1]
if tweets_country_day[country].has_key(str(day)):
tweets_country_day[country][str(day)] += total
else:
tweets_country_day[country][str(day)] = total
countries = list(countries)
countries.sort()
day_names = list(day_names)
day_names.sort()
days = {}
for country in countries:
for day in day_names:
if not days.has_key(day):
days[day] = []
try:
days[day].append(tweets_country_day[country][day])
except:
days[day].append(0)
# print days
# print countries
frame = DataFrame(days, index = countries)
return frame
def get_total_tweets_by_date_group():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
tweets_group_day = {}
groups = set()
day_names = set()
for i, party in enumerate(cache.parties):
# print '%i of %i' % (i, len(cache.parties))
try:
if cache.parties[party]['group_id'] == 'NI - SPAIN':
continue
group = cache.parties[party]['group_id']
groups.add(group)
if not tweets_group_day.has_key(group):
tweets_group_day[group] = {}
except:
continue
cursor.execute("SELECT created_at, count(*) FROM tweets WHERE user_id='%s' GROUP BY created_at" % cache.parties[party]['user_id'])
for result in cursor:
day = result[0]
day_names.add(str(day))
total = result[1]
if tweets_group_day[group].has_key(str(day)):
tweets_group_day[group][str(day)] += total
else:
tweets_group_day[group][str(day)] = total
groups = list(groups)
groups.sort()
day_names = list(day_names)
day_names.sort()
days = {}
for group in groups:
for day in day_names:
if not days.has_key(day):
days[day] = []
try:
days[day].append(tweets_group_day[group][day])
except:
days[day].append(0)
# print days
# print groups
frame = DataFrame(days, index = groups)
return frame
def get_countries_party_num():
country_data = {}
for party in cache.parties:
if cache.parties[party]['group_id'] == 'NI - SPAIN':
continue
try:
country = cache.locations[cache.parties[party]['location']]
if country in country_data.keys():
country_data[country] += 1
else:
country_data[country] = 1
except:
continue
return country_data
def get_num_tweets():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
cursor.execute("SELECT COUNT(*) FROM tweets")
total = 0
for r in cursor:
total = r[0]
cursor.close()
cnx.close()
return total
#******************************************STATIC DATA**************************************
#******************************************STATIC DATA**************************************
#******************************************STATIC DATA**************************************
#******************************************STATIC DATA**************************************
def load_eurobarometer():
country_data = {}
with open('./static_info/eurobarometer.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
country_data[row[1]] = { 'voice': row[2],
'future': row[3],
'economic': row[4],
'citizen': row[5]
}
return country_data
def load_turnout():
country_data = {}
with open('./static_info/turnout.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
country_data[row[1]] = {'turnout': row[2]}
return country_data
def load_mep_group():
group_data = {}
with open('./static_info/mep_group.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
group_data[row[0]] = {'meps': int(row[1])}
return group_data
def load_percen_country():
country_data = {}
with open('./static_info/results_percen_party_country.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
country = row[3]
percen = float(row[4].replace(',','.'))
if country_data.has_key(country):
country_data[country] += percen
else:
country_data[country] = percen
return country_data
def load_percen_party():
party_data = {}
with open('./static_info/results_percen_party_country.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
percen = float(row[4].replace(',','.'))
screen_name = row[2]
#twitter_id = cache.twitter_ids_rev[screen_name]
#slug = cache.parties_ids[twitter_id]
if party_data.has_key(screen_name):
party_data[screen_name] += percen
else:
party_data[screen_name] = percen
return party_data
def load_homophily():
#calculated by Juan
party_homophily = { 'PEL': 83.59,
'PES': 96.14,
'ALDE': 90.43,
'Greens/EFA': 71.35,
'EPP': 96.55,
'AECR': 65.00,
'MELD': 93.75,
}
return party_homophily
#******************************************METRICS**************************************
#******************************************METRICS**************************************
#******************************************METRICS**************************************
#******************************************METRICS**************************************
def get_party_metrics():
#only parties
#sna = get_sna('./sna/party_relations.gexf')
#complete network, its quite big and takes some time
#sna = get_sna('./sna/interactions-nonfiltered-5-27.gexf')
#only relevant relations
sna = get_sna('./sna/interactions-5-27.gexf')
activity_data = get_party_activity()
percen_data = load_percen_party()
total_tweets = []
degrees = []
betweenness = []
closeness = []
eigenvector = []
percen_votes = []
parties = []
for party in activity_data:
try:
degrees.append(sna[party]['degree'])
except:
degrees.append(0)
try:
betweenness.append(sna[party]['betweenness'])
except:
betweenness.append(0)
try:
closeness.append(sna[party]['closeness'])
except:
closeness.append(0)
try:
eigenvector.append(sna[party]['eigenvector'])
except:
eigenvector.append(0)
parties.append(party)
total_tweets.append(activity_data[party])
try:
percen_votes.append(percen_data[party])
except:
percen_votes.append(0)
data = { 'total_tweets' : total_tweets,
'degrees' : degrees,
'betweenness': betweenness,
'closeness': closeness,
'eigenvector': eigenvector,
'percen_votes': percen_votes
}
data_frame = DataFrame(data, index = parties)
sna_metrics = { 'degrees': degrees,
'betweenness': betweenness,
'closeness': closeness,
'eigenvector': eigenvector,
'total_tweets': total_tweets
}
results_metrics = {'percen_votes' : percen_votes}
metrics = [results_metrics, sna_metrics]
return data_frame, metrics
def get_group_metrics():
sna = get_sna('./sna/group_relations.gexf')
tweets_group, parties_group = get_groups_activity()
discourse = get_groups_discourse()
group_homophily = load_homophily()
mep_data = load_mep_group()
groups = []
total_tweets = []
total_parties = []
tweet_per_party = []
degrees = []
betweenness = []
closeness = []
eigenvector = []
homophily = []
discourse_europe_per = []
discourse_europe_uses = []
discourse_country_per = []
discourse_country_uses = []
meps = []
for group in tweets_group:
groups.append(group)
total_tweets.append(tweets_group[group])
total_parties.append(parties_group[group])
degrees.append(sna[group]['degree'])
betweenness.append(sna[group]['betweenness'])
closeness.append(sna[group]['closeness'])
eigenvector.append(sna[group]['eigenvector'])
homophily.append(group_homophily[group])
meps.append(mep_data[group]['meps'])
try:
europe_use = float(discourse[group]['europe'])
country_use = float(discourse[group]['country'])
tweets_num = tweets_group[group]
discourse_europe_per.append(europe_use/tweets_num)
discourse_europe_uses.append(europe_use/(europe_use + country_use))
discourse_country_per.append(country_use/tweets_num)
discourse_country_uses.append(country_use/(europe_use + country_use))
except:
discourse_europe_per.append(0)
discourse_europe_uses.append(0)
discourse_country_per.append(0)
discourse_country_uses.append(0)
tweet_per_party = list(np.array(total_tweets) * 1.0/np.array(total_parties))
data = { 'total_tweets' : total_tweets,
'total_parties' : total_parties,
'tweet_per_party': tweet_per_party ,
'degrees' : degrees,
'betweenness': betweenness,
'closeness': closeness,
'eigenvector': eigenvector,
'homophily': homophily,
'discourse_europe_per': discourse_europe_per,
'discourse_europe_uses': discourse_europe_uses,
'discourse_country_per': discourse_country_per,
'discourse_country_uses': discourse_country_uses,
'meps' : meps
}
data_frame = DataFrame(data, index = groups)
sna_metrics = { 'degrees': degrees,
'betweenness': betweenness,
'closeness': closeness,
'eigenvector': eigenvector,
'total_tweets': total_tweets,
'total_parties' : total_parties,
'tweet_per_party': tweet_per_party
}
homophily_metrics = {'homophily':homophily}
discourse_metrics = { 'discourse_europe_per': discourse_europe_per,
'discourse_europe_uses': discourse_europe_uses,
'discourse_country_per': discourse_country_per,
'discourse_country_uses': discourse_country_uses
}
results_metrics = {'meps' : meps}
metrics = [results_metrics, homophily_metrics, sna_metrics, discourse_metrics]
return data_frame, metrics
def get_country_metrics():
sna = get_sna('./sna/country_relations.gexf')
discourse = get_countries_discourse()
eurobarometer = load_eurobarometer()
activity = get_countries_activity()
num_parties = get_countries_party_num()
turnout_data = load_turnout()
percen_data = load_percen_country()
countries = []
country_parties = []
degrees = []
betweenness = []
closeness = []
eigenvector = []
total_tweets = []
discourse_europe_per = []
discourse_europe_uses = []
discourse_country_per = []
discourse_country_uses = []
voice_positive = []
future_europe = []
economic_union = []
be_citizen = []
turnout = []
percen_vote_captured = []
for country in sna:
countries.append(country)
country_parties.append(num_parties[country])
degrees.append(sna[country]['degree'])
betweenness.append(sna[country]['betweenness'])
closeness.append(sna[country]['closeness'])
eigenvector.append(sna[country]['eigenvector'])
total_tweets.append(activity[country])
try:
europe_use = float(discourse[country]['europe'])
country_use = float(discourse[country]['country'])
tweets_num = activity[country]
discourse_europe_per.append(europe_use/tweets_num)
discourse_europe_uses.append(europe_use/(europe_use + country_use))
discourse_country_per.append(country_use/tweets_num)
discourse_country_uses.append(country_use/(europe_use + country_use))
except:
discourse_europe_per.append(0)
discourse_europe_uses.append(0)
discourse_country_per.append(0)
discourse_country_uses.append(0)
try:
voice_positive.append(float(eurobarometer[country]['voice'])/100)
future_europe.append(float(eurobarometer[country]['future'])/100)
economic_union.append(float(eurobarometer[country]['economic'])/100)
be_citizen.append(float(eurobarometer[country]['citizen'])/100)
except:
print 'Error', country
try:
turnout.append(float(turnout_data[country]['turnout'])/100)
except:
print 'Error', country
try:
percen_vote_captured.append(percen_data[country])
except:
print 'Error', country
tweet_per_party = list(np.array(total_tweets)*1.0/np.array(country_parties))
data = { 'degrees': degrees,
'betweenness': betweenness,
'closeness': closeness,
'eigenvector': eigenvector,
'total_tweets': total_tweets,
'voice_positive': voice_positive,
'future_europe': future_europe,
'economic_union': economic_union,
'be_citizen': be_citizen,
'discourse_europe_per': discourse_europe_per,
'discourse_europe_uses': discourse_europe_uses,
'discourse_country_per': discourse_country_per,
'discourse_country_uses': discourse_country_uses,
'country_parties' : country_parties,
'tweet_per_party' : tweet_per_party,
'turnout' : turnout,
'percen_vote_captured' : percen_vote_captured
}
# print data
# print countries
data_frame = DataFrame(data, index = countries)
sna_metrics = { 'degrees': degrees,
'betweenness': betweenness,
'closeness': closeness,
'eigenvector': eigenvector,
'total_tweets': total_tweets,
'country_parties' : country_parties,
'tweet_per_party': tweet_per_party
}
eurobarometer_metrics = { 'voice_positive': voice_positive,
'future_europe': future_europe,
'economic_union': economic_union,
'be_citizen': be_citizen
}
discourse_metrics = {'discourse_europe_per': discourse_europe_per,
'discourse_europe_uses': discourse_europe_uses,
'discourse_country_per': discourse_country_per,
'discourse_country_uses': discourse_country_uses
}
turnout_metrics = {'turnout' : turnout}
result_metrics = {'percen_vote_captured' : percen_vote_captured}
metrics = [sna_metrics, discourse_metrics, turnout_metrics, eurobarometer_metrics, result_metrics]
return data_frame, metrics
def get_metrics_correlations(metrics):
for i in range(0, len(metrics)):
for j in range(i+1, len(metrics)):
metric_group_1 = metrics[i]
metric_group_2 = metrics[j]
for m1 in metric_group_1:
for m2 in metric_group_2:
print '*', m1, '-', m2
spearman = scipy.stats.spearmanr(metric_group_1[m1], metric_group_2[m2])
pearson = scipy.stats.pearsonr(metric_group_1[m1], metric_group_2[m2])
correlation = False
if spearman[0] > 0.3 or spearman[0] < -0.3:
print 'spearman:', spearman
correlation = True
if pearson[0] > 0.3 or pearson[0] < -0.3:
print 'pearson:', pearson
correlation = True
if correlation == False:
print 'No correlation'
def get_summary_statistics(frame):
print '\n-Total tweets:'
print frame.sum()['total_tweets']
print '\n-Total tweets sorted:'
print frame.sort_index(by='total_tweets', ascending=False)['total_tweets']
try:
print '\n-Avg tweets by party sorted:'
print frame.sort_index(by='tweet_per_party', ascending=False)['tweet_per_party']
except:
print 'No applicable data'
print '\n-Max:'
print frame.idxmax()
print '\n-Min :'
print frame.idxmin()
print '\n-Mean:'
print frame.mean()
print '\n-Median:'
print frame.median()
print '\n-std:'
print frame.std()
##run once before running the other methods
#get_party_relations()
#get_group_relations()
#get_country_relations()
#print 'done'
print "\nSTARTING..."
print "\n*************DATASET STATISTICS*************"
print 'Counting total tweets...'
total_tweets = get_num_tweets()
print '-Total tweets:', total_tweets
#***********************PARTIES*********************************
#***********************PARTIES*********************************
#***********************PARTIES*********************************
#***********************PARTIES*********************************
print 'Calculating party metrics...'
data_frame, metrics = get_party_metrics()
print '\n****SUMMARY STATISTICS****'
get_summary_statistics(data_frame)
print '\n****METRIC CORRELATIONS****'
get_metrics_correlations(metrics)
##***********************COUNTRIES*********************************
##***********************COUNTRIES*********************************
##***********************COUNTRIES*********************************
##***********************COUNTRIES*********************************
#
#
#print "\n\n\n*************ANALYZE COUNTRY METRICS*************"
#
#print 'Calculating country metrics...'
#data_frame, metrics = get_country_metrics()
#
##Tweets per party group by country graph
#ax = data_frame.sort_index(by='tweet_per_party', ascending=False)['tweet_per_party'].plot(kind='bar')
#ax.set_xticklabels([x.get_text() for x in ax.get_xticklabels()], fontsize=6, rotation=60)
#fig = ax.get_figure()
#fig.savefig('tweet_per_party_by_country.png')
#
#print '\n****SUMMARY STATISTICS****'
#print '\n-Captured vote:'
#print data_frame['percen_vote_captured']
#get_summary_statistics(data_frame)
#
#print '\n****METRIC CORRELATIONS****'
#get_metrics_correlations(metrics)
#
#
#print "\n\n\n*************ANALYZE TIMELINE BY COUNTRY*************"
#print 'Creating timeline...'
#t_country_day = get_total_tweets_by_date_country()
#tweet_metrics = []
#for c in t_country_day.T:
# tweet_metrics.append( {c:list(t_country_day.T[c])} )
#
#
#print '\n****COUNTRY PER DAY TWEETS CORRELATIONS****'
#get_metrics_correlations(tweet_metrics)
#
#
#
#
##***********************GROUPS*********************************
##***********************GROUPS*********************************
##***********************GROUPS*********************************
##***********************GROUPS*********************************
#
#
#print "\n\n\n*************ANALYZE GROUP METRICS*************"
#print 'Calculating group metrics...'
#frame, metrics = get_group_metrics()
#
#print '\n****SUMMARY STATISTICS****'
#get_summary_statistics(frame)
#
#print '\n****METRIC CORRELATIONS****'
#get_metrics_correlations(metrics)
#
#
#print "\n\n\n*************ANALYZE TIMELINE BY GROUP*************"
#print 'Creating timeline...'
#t_group_day = get_total_tweets_by_date_group()
#tweet_metrics = []
#for c in t_group_day.T:
# tweet_metrics.append( {c:list(t_group_day.T[c])} )
#
#
#print '\n****GROUP PER DAY TWEETS CORRELATIONS****'
#get_metrics_correlations(tweet_metrics)
#
##Graph of the timeline of the tweets per group
#ax = t_group_day.T.plot()
#ax.set_xticklabels([x.get_text() for x in ax.get_xticklabels()], fontsize=6, rotation=60)
#fig = ax.get_figure()
#fig.savefig('tweet_per_day_bygroup.png')
print '\n\n\n*****DONE*****' | apache-2.0 |
victorbergelin/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
MLWave/kepler-mapper | kmapper/plotlyviz.py | 1 | 21625 | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
| mit |
mgeplf/NeuroM | apps/__main__.py | 3 | 1057 | '''The morph-tool command line launcher'''
import logging
import click
from neurom import load_neuron
logging.basicConfig()
logger = logging.getLogger('morph_tool')
logger.setLevel(logging.INFO)
@click.group()
def cli():
'''The CLI entry point'''
@cli.command()
@click.argument('input_file')
@click.option('--plane', type=click.Choice(['3d', 'xy', 'yx', 'yz', 'zy', 'xz', 'zx']),
default='3d')
@click.option('--backend', type=click.Choice(['plotly', 'matplotlib']),
default='matplotlib')
def view(input_file, plane, backend):
'''A simple neuron viewer'''
if backend == 'matplotlib':
from neurom.viewer import draw
kwargs = {
'mode': '3d' if plane == '3d' else '2d',
}
if plane != '3d':
kwargs['plane'] = plane
draw(load_neuron(input_file), **kwargs)
else:
from neurom.view.plotly import draw
draw(load_neuron(input_file), plane=plane)
if backend == 'matplotlib':
import matplotlib.pyplot as plt
plt.show()
| bsd-3-clause |
liyu1990/sklearn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
arjoly/scikit-learn | sklearn/linear_model/least_angle.py | 4 | 54501 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..exceptions import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..externals.six import string_types
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, string_types) and Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
UCL-CS35/incdb-poc | venv/share/doc/dipy/examples/tracking_tissue_classifier.py | 3 | 11523 | """
=================================================
Using Various Tissue Classifiers for Tractography
=================================================
The tissue classifier determines if the tracking stops or continues at each
tracking position. The tracking stops when it reaches an ending region
(e.g. low FA, gray matter or corticospinal fluid regions) or exits the image
boundaries. The tracking also stops if the direction getter has no direction
to follow.
Each tissue classifier determines if the stopping is 'valid' or
'invalid'. A streamline is 'valid' when the tissue classifier determines if
the streamline stops in a position classified as 'ENDPOINT' or 'OUTSIDEIMAGE'.
A streamline is 'invalid' when it stops in a position classified as
'TRACKPOINT' or 'INVALIDPOINT'. These conditions are described below. The
'LocalTracking' generator can be set to output all generated streamlines
or only the 'valid' ones.
This example is an extension of the
:ref:`example_deterministic_fiber_tracking` example. We begin by loading the
data, creating a seeding mask from white matter voxels of the corpus callosum,
fitting a Constrained Spherical Deconvolution (CSD) reconstruction
model and creating the maximum deterministic direction getter.
"""
import numpy as np
from dipy.data import (read_stanford_labels,
default_sphere,
read_stanford_pve_maps)
from dipy.direction import DeterministicMaximumDirectionGetter
from dipy.io.trackvis import save_trk
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response)
from dipy.tracking.local import LocalTracking
from dipy.tracking import utils
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
ren = fvtk.ren()
hardi_img, gtab, labels_img = read_stanford_labels()
_, _, img_pve_wm = read_stanford_pve_maps()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.get_affine()
white_matter = img_pve_wm.get_data()
seed_mask = np.logical_and(labels == 2, white_matter == 1)
seeds = utils.seeds_from_mask(seed_mask, density=2, affine=affine)
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
csd_fit = csd_model.fit(data, mask=white_matter)
dg = DeterministicMaximumDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=30.,
sphere=default_sphere)
"""
Threshold Tissue Classifier
---------------------------
A scalar map can be used to define where the tracking stops. The threshold
tissue classifier uses a scalar map to stop the tracking whenever the
interpolated scalar value is lower than a fixed threshold. Here, we show
an example using the fractional anisotropy (FA) map of the DTI model.
The threshold tissue classifier uses a trilinear interpolation at the
tracking position.
**Parameters**
- metric_map: numpy array [:, :, :]
- threshold: float
**Stopping criterion**
- 'ENDPOINT': metric_map < threshold,
- 'OUTSIDEIMAGE': tracking point outside of metric_map,
- 'TRACKPOINT': stop because no direction is available,
- 'INVALIDPOINT': N/A.
"""
import matplotlib.pyplot as plt
import dipy.reconst.dti as dti
from dipy.reconst.dti import fractional_anisotropy
from dipy.tracking.local import ThresholdTissueClassifier
tensor_model = dti.TensorModel(gtab)
tenfit = tensor_model.fit(data, mask=labels > 0)
FA = fractional_anisotropy(tenfit.evals)
threshold_classifier = ThresholdTissueClassifier(FA, .2)
fig = plt.figure()
mask_fa = FA.copy()
mask_fa[mask_fa < 0.2] = 0
plt.xticks([])
plt.yticks([])
plt.imshow(mask_fa[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
fig.tight_layout()
fig.savefig('threshold_fa.png')
"""
.. figure:: threshold_fa.png
:align: center
**Thresholded fractional anisotropy map.**
"""
all_streamlines_threshold_classifier = LocalTracking(dg,
threshold_classifier,
seeds,
affine,
step_size=.5,
return_all=True)
save_trk("deterministic_threshold_classifier_all.trk",
all_streamlines_threshold_classifier,
affine,
labels.shape)
streamlines = [sl for sl in all_streamlines_threshold_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='all_streamlines_threshold_classifier.png',
size=(600, 600))
"""
.. figure:: all_streamlines_threshold_classifier.png
:align: center
**Deterministic tractography using a thresholded fractional anisotropy.**
"""
"""
Binary Tissue Classifier
------------------------
A binary mask can be used to define where the tracking stops. The binary
tissue classifier stops the tracking whenever the tracking position is outside
the mask. Here, we show how to obtain the binary tissue classifier from
the white matter mask defined above. The binary tissue classifier uses a
nearest-neighborhood interpolation at the tracking position.
**Parameters**
- mask: numpy array [:, :, :]
**Stopping criterion**
- 'ENDPOINT': mask = 0
- 'OUTSIDEIMAGE': tracking point outside of mask
- 'TRACKPOINT': no direction is available
- 'INVALIDPOINT': N/A
"""
from dipy.tracking.local import BinaryTissueClassifier
binary_classifier = BinaryTissueClassifier(white_matter == 1)
fig = plt.figure()
plt.xticks([])
plt.yticks([])
fig.tight_layout()
plt.imshow(white_matter[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
fig.savefig('white_matter_mask.png')
"""
.. figure:: white_matter_mask.png
:align: center
**White matter binary mask.**
"""
all_streamlines_binary_classifier = LocalTracking(dg,
binary_classifier,
seeds,
affine,
step_size=.5,
return_all=True)
save_trk("deterministic_binary_classifier_all.trk",
all_streamlines_binary_classifier,
affine,
labels.shape)
streamlines = [sl for sl in all_streamlines_binary_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='all_streamlines_binary_classifier.png',
size=(600, 600))
"""
.. figure:: all_streamlines_binary_classifier.png
:align: center
**Deterministic tractography using a binary white matter mask.**
"""
"""
ACT Tissue Classifier
---------------------
Anatomically-constrained tractography (ACT) [Smith2012]_ uses information from
anatomical images to determine when the tractography stops. The 'include_map'
defines when the streamline reached a 'valid' stopping region (e.g. gray
matter partial volume estimation (PVE) map) and the 'exclude_map' defines when
the streamline reached an 'invalid' stopping region (e.g. corticospinal fluid
PVE map). The background of the anatomical image should be added to the
'include_map' to keep streamlines exiting the brain (e.g. through the
brain stem). The ACT tissue classifier uses a trilinear interpolation
at the tracking position.
**Parameters**
- include_map: numpy array [:, :, :],
- exclude_map: numpy array [:, :, :],
**Stopping criterion**
- 'ENDPOINT': include_map > 0.5,
- 'OUTSIDEIMAGE': tracking point outside of include_map or exclude_map,
- 'TRACKPOINT': no direction is available,
- 'INVALIDPOINT': exclude_map > 0.5.
"""
from dipy.tracking.local import ActTissueClassifier
img_pve_csf, img_pve_gm, img_pve_wm = read_stanford_pve_maps()
background = np.ones(img_pve_gm.shape)
background[(img_pve_gm.get_data() +
img_pve_wm.get_data() +
img_pve_csf.get_data()) > 0] = 0
include_map = img_pve_gm.get_data()
include_map[background > 0] = 1
exclude_map = img_pve_csf.get_data()
act_classifier = ActTissueClassifier(include_map, exclude_map)
fig = plt.figure()
plt.subplot(121)
plt.xticks([])
plt.yticks([])
plt.imshow(include_map[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
plt.subplot(122)
plt.xticks([])
plt.yticks([])
plt.imshow(exclude_map[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
fig.tight_layout()
fig.savefig('act_maps.png')
"""
.. figure:: act_maps.png
:align: center
**Include (left) and exclude (right) maps for ACT.**
"""
all_streamlines_act_classifier = LocalTracking(dg,
act_classifier,
seeds,
affine,
step_size=.5,
return_all=True)
save_trk("deterministic_act_classifier_all.trk",
all_streamlines_act_classifier,
affine,
labels.shape)
streamlines = [sl for sl in all_streamlines_act_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='all_streamlines_act_classifier.png',
size=(600, 600))
"""
.. figure:: all_streamlines_act_classifier.png
:align: center
**Deterministic tractography using ACT stopping criterion.**
"""
valid_streamlines_act_classifier = LocalTracking(dg,
act_classifier,
seeds,
affine,
step_size=.5,
return_all=False)
save_trk("deterministic_act_classifier_valid.trk",
valid_streamlines_act_classifier,
affine,
labels.shape)
streamlines = [sl for sl in valid_streamlines_act_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='valid_streamlines_act_classifier.png',
size=(600, 600))
"""
.. figure:: valid_streamlines_act_classifier.png
:align: center
**Deterministic tractography using a anatomically-constrained tractography
stopping criterion. Streamlines ending in gray matter region only.**
"""
"""
The threshold and binary tissue classifiers use respectively a scalar map and a
binary mask to stop the tracking. The ACT tissue classifier use partial volume
fraction (PVE) maps from an anatomical image to stop the tracking. Additionally,
the ACT tissue classifier determines if the tracking stopped in expected regions
(e.g. gray matter) and allows the user to get only streamlines stopping in those
regions.
Notes
------
Currently in ACT the proposed method that cuts streamlines going through
subcortical gray matter regions is not implemented. The backtracking technique
for streamlines reaching INVALIDPOINT is not implemented either.
References
----------
.. [Smith2012] Smith, R. E., Tournier, J.-D., Calamante, F., & Connelly, A.
Anatomically-constrained tractography: Improved diffusion MRI
streamlines tractography through effective use of anatomical
information. NeuroImage, 63(3), 1924-1938, 2012.
"""
| bsd-2-clause |
fmgvalente/agent | modules/svm.py | 1 | 1165 | from sklearn import svm
from sklearn.externals import joblib
#since this is a very light datatype we do some side effects here
#dont do this, this just saves launching a process
def create_execution_script(**options):
tmp_options = options #because refs...
del tmp_options['input']
workdir = tmp_options['workdir']
del tmp_options['workdir']
channel = tmp_options['channel']
del tmp_options['channel']
#default values for parameters
if 'C' not in tmp_options: #regularization
tmp_options['C'] = 1
if 'kernel' not in tmp_options:
tmp_options['kernel'] = "rbf"
if 'gamma' not in tmp_options:
tmp_options['gamma'] = 3
if 'cache_size' not in tmp_options:
tmp_options['cache_size'] = 1000
if 'probability' not in tmp_options:
tmp_options['probability'] = False
if 'class_weight' not in tmp_options:
tmp_options['class_weight'] = 'auto'
print("option:"+repr(options))
classifier = svm.SVC(**tmp_options)
joblib.dump(classifier, workdir+"/svm.pkl")
script_string = ""
return script_string
def data(workdir, channel, **options):
classifier = joblib.load(workdir+"/svm.pkl")
return { 'datatype' : 'model', 'model' : classifier}
| gpl-3.0 |
thanatoskira/AndroGuard | elsim/elsim/elsim.py | 9 | 16425 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging
ELSIM_VERSION = 0.2
log_elsim = logging.getLogger("elsim")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_elsim.addHandler(console_handler)
log_runtime = logging.getLogger("elsim.runtime") # logs at runtime
log_interactive = logging.getLogger("elsim.interactive") # logs in interactive functions
log_loading = logging.getLogger("elsim.loading") # logs when loading
def set_debug() :
log_elsim.setLevel( logging.DEBUG )
def get_debug() :
return log_elsim.getEffectiveLevel() == logging.DEBUG
def warning(x):
log_runtime.warning(x)
def error(x) :
log_runtime.error(x)
raise()
def debug(x) :
log_runtime.debug(x)
from similarity.similarity import *
FILTER_ELEMENT_METH = "FILTER_ELEMENT_METH"
FILTER_CHECKSUM_METH = "FILTER_CHECKSUM_METH" # function to checksum an element
FILTER_SIM_METH = "FILTER_SIM_METH" # function to calculate the similarity between two elements
FILTER_SORT_METH = "FILTER_SORT_METH" # function to sort all similar elements
FILTER_SORT_VALUE = "FILTER_SORT_VALUE" # value which used in the sort method to eliminate not interesting comparisons
FILTER_SKIPPED_METH = "FILTER_SKIPPED_METH" # object to skip elements
FILTER_SIM_VALUE_METH = "FILTER_SIM_VALUE_METH" # function to modify values of the similarity
BASE = "base"
ELEMENTS = "elements"
HASHSUM = "hashsum"
SIMILAR_ELEMENTS = "similar_elements"
HASHSUM_SIMILAR_ELEMENTS = "hash_similar_elements"
NEW_ELEMENTS = "newelements"
HASHSUM_NEW_ELEMENTS = "hash_new_elements"
DELETED_ELEMENTS = "deletedelements"
IDENTICAL_ELEMENTS = "identicalelements"
INTERNAL_IDENTICAL_ELEMENTS = "internal identical elements"
SKIPPED_ELEMENTS = "skippedelements"
SIMILARITY_ELEMENTS = "similarity_elements"
SIMILARITY_SORT_ELEMENTS = "similarity_sort_elements"
class ElsimNeighbors :
def __init__(self, x, ys) :
import numpy as np
from sklearn.neighbors import NearestNeighbors
#print x, ys
CI = np.array( [x.checksum.get_signature_entropy(), x.checksum.get_entropy()] )
#print CI, x.get_info()
#print
for i in ys :
CI = np.vstack( (CI, [i.checksum.get_signature_entropy(), i.checksum.get_entropy()]) )
#idx = 0
#for i in np.array(CI)[1:] :
# print idx+1, i, ys[idx].get_info()
# idx += 1
self.neigh = NearestNeighbors(2, 0.4)
self.neigh.fit(np.array(CI))
#print self.neigh.kneighbors( CI[0], len(CI) )
self.CI = CI
self.ys = ys
def cmp_elements(self) :
z = self.neigh.kneighbors( self.CI[0], 5 )
l = []
cmp_values = z[0][0]
cmp_elements = z[1][0]
idx = 1
for i in cmp_elements[1:] :
#if cmp_values[idx] > 1.0 :
# break
#print i, cmp_values[idx], self.ys[ i - 1 ].get_info()
l.append( self.ys[ i - 1 ] )
idx += 1
return l
def split_elements(el, els) :
e1 = {}
for i in els :
e1[ i ] = el.get_associated_element( i )
return e1
####
# elements : entropy raw, hash, signature
#
# set elements : hash
# hash table elements : hash --> element
class Elsim :
def __init__(self, e1, e2, F, T=None, C=None, libnative=True, libpath="elsim/elsim/similarity/libsimilarity/libsimilarity.so") :
self.e1 = e1
self.e2 = e2
self.F = F
self.compressor = SNAPPY_COMPRESS
set_debug()
if T != None :
self.F[ FILTER_SORT_VALUE ] = T
if isinstance(libnative, str) :
libpath = libnative
libnative = True
self.sim = SIMILARITY( libpath, libnative )
if C != None :
if C in H_COMPRESSOR :
self.compressor = H_COMPRESSOR[ C ]
self.sim.set_compress_type( self.compressor )
else :
self.sim.set_compress_type( self.compressor )
self.filters = {}
self._init_filters()
self._init_index_elements()
self._init_similarity()
self._init_sort_elements()
self._init_new_elements()
def _init_filters(self) :
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ HASHSUM ] = {}
self.filters[ IDENTICAL_ELEMENTS ] = set()
self.filters[ SIMILAR_ELEMENTS ] = []
self.filters[ HASHSUM_SIMILAR_ELEMENTS ] = []
self.filters[ NEW_ELEMENTS ] = set()
self.filters[ HASHSUM_NEW_ELEMENTS ] = []
self.filters[ DELETED_ELEMENTS ] = []
self.filters[ SKIPPED_ELEMENTS ] = []
self.filters[ ELEMENTS ][ self.e1 ] = []
self.filters[ HASHSUM ][ self.e1 ] = []
self.filters[ ELEMENTS ][ self.e2 ] = []
self.filters[ HASHSUM ][ self.e2 ] = []
self.filters[ SIMILARITY_ELEMENTS ] = {}
self.filters[ SIMILARITY_SORT_ELEMENTS ] = {}
self.set_els = {}
self.ref_set_els = {}
self.ref_set_ident = {}
def _init_index_elements(self) :
self.__init_index_elements( self.e1, 1 )
self.__init_index_elements( self.e2 )
def __init_index_elements(self, ce, init=0) :
self.set_els[ ce ] = set()
self.ref_set_els[ ce ] = {}
self.ref_set_ident[ce] = {}
for ae in ce.get_elements() :
e = self.filters[BASE][FILTER_ELEMENT_METH]( ae, ce )
if self.filters[BASE][FILTER_SKIPPED_METH].skip( e ) :
self.filters[ SKIPPED_ELEMENTS ].append( e )
continue
self.filters[ ELEMENTS ][ ce ].append( e )
fm = self.filters[ BASE ][ FILTER_CHECKSUM_METH ]( e, self.sim )
e.set_checksum( fm )
sha256 = e.getsha256()
self.filters[ HASHSUM ][ ce ].append( sha256 )
if sha256 not in self.set_els[ ce ] :
self.set_els[ ce ].add( sha256 )
self.ref_set_els[ ce ][ sha256 ] = e
self.ref_set_ident[ce][sha256] = []
self.ref_set_ident[ce][sha256].append(e)
def _init_similarity(self) :
intersection_elements = self.set_els[ self.e2 ].intersection( self.set_els[ self.e1 ] )
difference_elements = self.set_els[ self.e2 ].difference( intersection_elements )
self.filters[IDENTICAL_ELEMENTS].update([ self.ref_set_els[ self.e1 ][ i ] for i in intersection_elements ])
available_e2_elements = [ self.ref_set_els[ self.e2 ][ i ] for i in difference_elements ]
# Check if some elements in the first file has been modified
for j in self.filters[ELEMENTS][self.e1] :
self.filters[ SIMILARITY_ELEMENTS ][ j ] = {}
#debug("SIM FOR %s" % (j.get_info()))
if j.getsha256() not in self.filters[HASHSUM][self.e2] :
#eln = ElsimNeighbors( j, available_e2_elements )
#for k in eln.cmp_elements() :
for k in available_e2_elements :
#debug("%s" % k.get_info())
self.filters[SIMILARITY_ELEMENTS][ j ][ k ] = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
if j.getsha256() not in self.filters[HASHSUM_SIMILAR_ELEMENTS] :
self.filters[SIMILAR_ELEMENTS].append(j)
self.filters[HASHSUM_SIMILAR_ELEMENTS].append( j.getsha256() )
def _init_sort_elements(self) :
deleted_elements = []
for j in self.filters[SIMILAR_ELEMENTS] :
#debug("SORT FOR %s" % (j.get_info()))
sort_h = self.filters[BASE][FILTER_SORT_METH]( j, self.filters[SIMILARITY_ELEMENTS][ j ], self.filters[BASE][FILTER_SORT_VALUE] )
self.filters[SIMILARITY_SORT_ELEMENTS][ j ] = set( i[0] for i in sort_h )
ret = True
if sort_h == [] :
ret = False
if ret == False :
deleted_elements.append( j )
for j in deleted_elements :
self.filters[ DELETED_ELEMENTS ].append( j )
self.filters[ SIMILAR_ELEMENTS ].remove( j )
def __checksort(self, x, y) :
return y in self.filters[SIMILARITY_SORT_ELEMENTS][ x ]
def _init_new_elements(self) :
# Check if some elements in the second file are totally new !
for j in self.filters[ELEMENTS][self.e2] :
# new elements can't be in similar elements
if j not in self.filters[SIMILAR_ELEMENTS] :
# new elements hashes can't be in first file
if j.getsha256() not in self.filters[HASHSUM][self.e1] :
ok = True
# new elements can't be compared to another one
for diff_element in self.filters[SIMILAR_ELEMENTS] :
if self.__checksort( diff_element, j ) :
ok = False
break
if ok :
if j.getsha256() not in self.filters[HASHSUM_NEW_ELEMENTS] :
self.filters[NEW_ELEMENTS].add( j )
self.filters[HASHSUM_NEW_ELEMENTS].append( j.getsha256() )
def get_similar_elements(self) :
""" Return the similar elements
@rtype : a list of elements
"""
return self.get_elem( SIMILAR_ELEMENTS )
def get_new_elements(self) :
""" Return the new elements
@rtype : a list of elements
"""
return self.get_elem( NEW_ELEMENTS )
def get_deleted_elements(self) :
""" Return the deleted elements
@rtype : a list of elements
"""
return self.get_elem( DELETED_ELEMENTS )
def get_internal_identical_elements(self, ce) :
""" Return the internal identical elements
@rtype : a list of elements
"""
return self.get_elem( INTERNAL_IDENTICAL_ELEMENTS )
def get_identical_elements(self) :
""" Return the identical elements
@rtype : a list of elements
"""
return self.get_elem( IDENTICAL_ELEMENTS )
def get_skipped_elements(self) :
return self.get_elem( SKIPPED_ELEMENTS )
def get_elem(self, attr) :
return [ x for x in self.filters[attr] ]
def show_element(self, i, details=True) :
print "\t", i.get_info()
if details :
if i.getsha256() == None :
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
if len(self.ref_set_ident[self.e2][i.getsha256()]) > 1:
for ident in self.ref_set_ident[self.e2][i.getsha256()]:
print "\t\t-->", ident.get_info()
else:
print "\t\t-->", self.ref_set_els[self.e2][ i.getsha256() ].get_info()
else :
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ] :
print "\t\t-->", j.get_info(), self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ]
def get_element_info(self, i) :
l = []
if i.getsha256() == None :
pass
elif i.getsha256() in self.ref_set_els[self.e2] :
l.append( [ i, self.ref_set_els[self.e2][ i.getsha256() ] ] )
else :
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ] :
l.append( [i, j, self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ] ] )
return l
def get_associated_element(self, i) :
return list(self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ])[0]
def get_similarity_value(self, new=True) :
values = []
self.sim.set_compress_type( BZ2_COMPRESS )
for j in self.filters[SIMILAR_ELEMENTS] :
k = self.get_associated_element( j )
value = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
# filter value
value = self.filters[BASE][FILTER_SIM_VALUE_METH]( value )
values.append( value )
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 0.0 ) for i in self.filters[IDENTICAL_ELEMENTS] ] )
if new == True :
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[NEW_ELEMENTS] ] )
else :
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[DELETED_ELEMENTS] ] )
self.sim.set_compress_type( self.compressor )
similarity_value = 0.0
for i in values :
similarity_value += (1.0 - i)
if len(values) == 0 :
return 0.0
return (similarity_value/len(values)) * 100
def show(self):
print "Elements:"
print "\t IDENTICAL:\t", len(self.get_identical_elements())
print "\t SIMILAR: \t", len(self.get_similar_elements())
print "\t NEW:\t\t", len(self.get_new_elements())
print "\t DELETED:\t", len(self.get_deleted_elements())
print "\t SKIPPED:\t", len(self.get_skipped_elements())
#self.sim.show()
ADDED_ELEMENTS = "added elements"
DELETED_ELEMENTS = "deleted elements"
LINK_ELEMENTS = "link elements"
DIFF = "diff"
class Eldiff :
def __init__(self, elsim, F) :
self.elsim = elsim
self.F = F
self._init_filters()
self._init_diff()
def _init_filters(self) :
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ ADDED_ELEMENTS ] = {}
self.filters[ DELETED_ELEMENTS ] = {}
self.filters[ LINK_ELEMENTS ] = {}
def _init_diff(self) :
for i, j in self.elsim.get_elements() :
self.filters[ ADDED_ELEMENTS ][ j ] = []
self.filters[ DELETED_ELEMENTS ][ i ] = []
x = self.filters[ BASE ][ DIFF ]( i, j )
self.filters[ ADDED_ELEMENTS ][ j ].extend( x.get_added_elements() )
self.filters[ DELETED_ELEMENTS ][ i ].extend( x.get_deleted_elements() )
self.filters[ LINK_ELEMENTS ][ j ] = i
#self.filters[ LINK_ELEMENTS ][ i ] = j
def show(self) :
for bb in self.filters[ LINK_ELEMENTS ] : #print "la"
print bb.get_info(), self.filters[ LINK_ELEMENTS ][ bb ].get_info()
print "Added Elements(%d)" % (len(self.filters[ ADDED_ELEMENTS ][ bb ]))
for i in self.filters[ ADDED_ELEMENTS ][ bb ] :
print "\t",
i.show()
print "Deleted Elements(%d)" % (len(self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]))
for i in self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ] :
print "\t",
i.show()
print
def get_added_elements(self) :
return self.filters[ ADDED_ELEMENTS ]
def get_deleted_elements(self) :
return self.filters[ DELETED_ELEMENTS ]
| lgpl-3.0 |
Gustry/GeoHealth | src/gui/analysis/parent_incidence_density_dialog.py | 1 | 13912 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
GeoHealth
A QGIS plugin
-------------------
begin : 2014-08-20
copyright : (C) 2014 by Etienne Trimaille
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from tempfile import NamedTemporaryFile
from qgis.PyQt.QtWidgets import QDialog, QDialogButtonBox, QTableWidgetItem, QApplication
from qgis.PyQt.QtCore import QSize, QVariant, Qt, pyqtSignal
from qgis.PyQt.QtWidgets import QFileDialog
from qgis.utils import Qgis
from qgis.core import \
QgsField,\
QgsGradientColorRamp,\
QgsGraduatedSymbolRenderer,\
QgsSymbol,\
QgsVectorFileWriter,\
QgsFeature,\
QgsVectorLayer,\
QgsProject,\
QgsMapLayerProxyModel,\
QgsGeometry
from matplotlib.backends.backend_qt4agg import \
FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from GeoHealth.src.core.graph_toolbar import CustomNavigationToolbar
from GeoHealth.src.core.tools import display_message_bar, tr
from GeoHealth.src.core.exceptions import \
GeoHealthException,\
NoLayerProvidedException,\
DifferentCrsException,\
FieldExistingException,\
FieldException,\
NotANumberException
from GeoHealth.src.core.stats import Stats
class IncidenceDensityDialog(QDialog):
signalAskCloseWindow = pyqtSignal(int, name='signalAskCloseWindow')
signalStatus = pyqtSignal(int, str, name='signalStatus')
def __init__(self, parent=None):
"""Constructor.
Base class for Incidence and Density dialogs.
use_area : If you use the area of the polygon or the population field.
use_point_layer : If you a point a layer, or a field in the polygon
layer.
"""
self.parent = parent
QDialog.__init__(self, parent)
self.name_field = None
self.admin_layer = None
self.figure = None
self.canvas = None
self.toolbar = None
self.output_file_path = None
self.output_layer = None
# Settings
self.use_area = None
self.use_point_layer = None
def setup_ui(self):
# Connect slot.
# noinspection PyUnresolvedReferences
self.button_browse.clicked.connect(self.open_file_browser)
self.button_box_ok.button(QDialogButtonBox.Ok).clicked.connect(
self.run_stats)
self.button_box_ok.button(QDialogButtonBox.Cancel).clicked.connect(
self.hide)
self.button_box_ok.button(QDialogButtonBox.Cancel).clicked.connect(
self.signalAskCloseWindow.emit)
# Add items in symbology
self.cbx_mode.addItem(
'Equal interval', QgsGraduatedSymbolRenderer.EqualInterval)
self.cbx_mode.addItem(
'Quantile (equal count)', QgsGraduatedSymbolRenderer.Quantile)
self.cbx_mode.addItem(
'Natural breaks', QgsGraduatedSymbolRenderer.Jenks)
self.cbx_mode.addItem(
'Standard deviation', QgsGraduatedSymbolRenderer.StdDev)
self.cbx_mode.addItem(
'Pretty breaks', QgsGraduatedSymbolRenderer.Pretty)
# Setup the graph.
self.figure = Figure()
self.canvas = FigureCanvas(self.figure)
self.canvas.setMinimumSize(QSize(300, 0))
self.toolbar = CustomNavigationToolbar(self.canvas, self)
self.layout_plot.addWidget(self.toolbar)
self.layout_plot.addWidget(self.canvas)
self.cbx_aggregation_layer.setFilters(
QgsMapLayerProxyModel.PolygonLayer)
if self.use_point_layer:
self.cbx_case_layer.setFilters(QgsMapLayerProxyModel.PointLayer)
if not self.use_area:
self.cbx_population_field.setLayer(
self.cbx_aggregation_layer.currentLayer())
self.cbx_aggregation_layer.layerChanged.connect(
self.cbx_population_field.setLayer)
self.cbx_aggregation_layer.layerChanged.connect(
self.reset_field_population)
self.reset_field_population()
if not self.use_point_layer:
self.cbx_case_field.setLayer(
self.cbx_aggregation_layer.currentLayer())
self.cbx_aggregation_layer.layerChanged.connect(
self.cbx_case_field.setLayer)
self.cbx_aggregation_layer.layerChanged.connect(
self.reset_field_case)
self.reset_field_case()
def reset_field_population(self):
self.cbx_population_field.setCurrentIndex(0)
def reset_field_case(self):
self.cbx_case_field.setCurrentIndex(0)
def open_file_browser(self):
output_file, __ = QFileDialog.getSaveFileName(
self.parent, tr('Save shapefile'), filter='SHP (*.shp)')
self.le_output_filepath.setText(output_file[0])
def run_stats(self):
"""Main function which do the process."""
# Get the common fields.
self.admin_layer = self.cbx_aggregation_layer.currentLayer()
if self.use_point_layer:
# If we use a point layer.
point_layer = self.cbx_case_layer.currentLayer()
else:
# If we use a column with number of case.
case_column = self.cbx_case_field.currentField()
index_case = self.admin_layer.fieldNameIndex(case_column)
if not self.use_area:
# If we don't use density.
population = self.cbx_population_field.currentField()
index_population = self.admin_layer.fieldNameIndex(population)
if not self.name_field:
self.name_field = self.le_new_column.placeholderText()
# Add new column.
add_nb_intersections = self.checkBox_addNbIntersections.isChecked()
# Ratio
ratio = self.cbx_ratio.currentText()
ratio = ratio.replace(' ', '')
# Output.
self.output_file_path = self.le_output_filepath.text()
try:
self.button_box_ok.setDisabled(True)
# noinspection PyArgumentList
QApplication.setOverrideCursor(Qt.WaitCursor)
# noinspection PyArgumentList
QApplication.processEvents()
if not self.admin_layer:
raise NoLayerProvidedException
if not self.admin_layer and self.use_point_layer:
raise NoLayerProvidedException
crs_admin_layer = self.admin_layer.crs()
if self.use_point_layer:
crs_point_layer = point_layer.crs()
if crs_admin_layer != crs_point_layer:
raise DifferentCrsException(
epsg1=crs_point_layer.authid(),
epsg2=crs_admin_layer.authid())
if not self.use_point_layer and not self.use_area:
if index_population == index_case:
raise FieldException(field_1='Population', field_2='Case')
try:
ratio = float(ratio)
except ValueError:
raise NotANumberException(suffix=ratio)
# Output
if not self.output_file_path:
temp_file = NamedTemporaryFile(
delete=False,
suffix='-geohealth.shp')
self.output_file_path = temp_file.name
temp_file.flush()
temp_file.close()
admin_layer_provider = self.admin_layer.dataProvider()
fields = admin_layer_provider.fields()
if admin_layer_provider.fieldNameIndex(self.name_field) != -1:
raise FieldExistingException(field=self.name_field)
fields.append(QgsField(self.name_field, QVariant.Double))
if add_nb_intersections:
fields.append(QgsField('nb_of_intersections', QVariant.Int))
data = []
file_writer = QgsVectorFileWriter(
self.output_file_path,
'utf-8',
fields,
Qgis.WKBPolygon,
self.admin_layer.crs(),
'ESRI Shapefile')
if self.use_point_layer:
total_case = point_layer.featureCount()
else:
total_case = 0
for i, feature in enumerate(self.admin_layer.getFeatures()):
attributes = feature.attributes()
if self.use_point_layer:
count = 0
for f in point_layer.getFeatures():
if f.geometry().intersects(feature.geometry()):
count += 1
else:
count = int(attributes[index_case])
total_case += count
try:
if self.use_area:
area = feature.geometry().area()
value = float(count) / area * ratio
else:
try:
population = float(attributes[index_population])
except ValueError:
raise NotANumberException(
suffix=attributes[index_population])
value = float(count) / population * ratio
except ZeroDivisionError:
value = None
except TypeError:
value = None
data.append(value)
attributes.append(value)
if add_nb_intersections:
attributes.append(count)
new_feature = QgsFeature()
new_geom = QgsGeometry(feature.geometry())
new_feature.setAttributes(attributes)
new_feature.setGeometry(new_geom)
file_writer.addFeature(new_feature)
del file_writer
self.output_layer = QgsVectorLayer(
self.output_file_path,
self.name_field,
'ogr')
QgsProject.instance().addMapLayer(self.output_layer)
if self.checkBox_incidence_runStats.isChecked():
stats = Stats(data)
items_stats = [
'Incidence null,%d' % stats.null_values(),
'Count(point),%d' % total_case,
'Count(polygon),%d' % self.admin_layer.featureCount(),
'Min,%d' % stats.min(),
'Average,%f' % stats.average(),
'Max,%d' % stats.max(),
'Median,%f' % stats.median(),
'Range,%d' % stats.range(),
'Variance,%f' % stats.variance(),
'Standard deviation,%f' % stats.standard_deviation()
]
self.tableWidget.clear()
self.tableWidget.setColumnCount(2)
labels = ['Parameters', 'Values']
self.tableWidget.setHorizontalHeaderLabels(labels)
self.tableWidget.setRowCount(len(items_stats))
for i, item in enumerate(items_stats):
s = item.split(',')
self.tableWidget.setItem(i, 0, QTableWidgetItem(s[0]))
self.tableWidget.setItem(i, 1, QTableWidgetItem(s[1]))
self.tableWidget.resizeRowsToContents()
self.draw_plot(data)
else:
self.hide()
if self.symbology.isChecked():
self.add_symbology()
self.signalStatus.emit(3, tr('Successful process'))
except GeoHealthException as e:
display_message_bar(msg=e.msg, level=e.level, duration=e.duration)
finally:
self.button_box_ok.setDisabled(False)
# noinspection PyArgumentList
QApplication.restoreOverrideCursor()
# noinspection PyArgumentList
QApplication.processEvents()
def draw_plot(self, data):
"""Function to draw the plot and display it in the canvas.
:param data: The data to display
:type data: list
"""
ax = self.figure.add_subplot(111)
ax.hold(False)
ax.plot(data, '*-')
ax.set_xlabel('Polygon')
ax.set_ylabel(self.name_field)
ax.grid()
self.canvas.draw()
def add_symbology(self):
low_color = self.color_low_value.color()
high_color = self.color_high_value.color()
index = self.cbx_mode.currentIndex()
mode = self.cbx_mode.itemData(index)
classes = self.spinBox_classes.value()
# Compute renderer
# noinspection PyArgumentList
symbol = QgsSymbol.defaultSymbol(Qgis.Polygon)
color_ramp = QgsGradientColorRamp(low_color, high_color)
# noinspection PyArgumentList
renderer = QgsGraduatedSymbolRenderer.createRenderer(
self.output_layer,
self.name_field,
classes,
mode,
symbol,
color_ramp)
self.output_layer.setRenderer(renderer)
| gpl-3.0 |
liberatorqjw/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 28 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
DH-Box/DH-Box.github.io | docs/tutorial/sampledoc/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| apache-2.0 |
arahuja/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jart/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 67 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=[0, 1]),
cov=numpy.squeeze(current_prediction["covariance"], axis=[0, 1]))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
jhamrick/plotchecker | plotchecker/scatterplot.py | 1 | 18406 | import numpy as np
from .base import PlotChecker, InvalidPlotError
class ScatterPlotChecker(PlotChecker):
"""A plot checker for scatter plots.
Parameters
----------
axis : ``matplotlib.axes.Axes`` object
A set of matplotlib axes (e.g. obtained through ``plt.gca()``)
"""
def __init__(self, axis):
"""Initialize the scatter plot checker."""
super(ScatterPlotChecker, self).__init__(axis)
self.lines = self.axis.get_lines()
self.collections = self.axis.collections
# check that there are only lines or collections, not both
if len(self.lines) == 0 and len(self.collections) == 0:
raise InvalidPlotError("No data found")
# check that if there are lines, linestyle is '' and markers are not ''
for x in self.lines:
if len(x.get_xydata()) > 1 and x.get_linestyle() != 'None':
raise InvalidPlotError("This is supposed to be a scatter plot, but it has lines!")
if self._parse_marker(x.get_marker()) == '':
raise InvalidPlotError("This is supposed to be a scatter plot, but there are no markers!")
def _parse_expected_attr(self, attr_name, attr_val):
"""Ensure that the given expected attribute values are in the right shape."""
if attr_name in ('colors', 'edgecolors'):
# if it's a color, first check if it's just a single color -- if it's
# not a single color, this command will throw an error and we can try
# iterating over the multiple colors that were given
try:
attr_val = np.array([self._color2rgb(attr_val)])
except (ValueError, TypeError):
attr_val = np.array([self._color2rgb(x) for x in attr_val])
elif not hasattr(attr_val, '__iter__'):
# if it's not a color, then just make sure we have an array
attr_val = np.array([attr_val])
# tile the given values if we've only been given one, so it's the same
# shape as the data
if len(attr_val) == 1:
attr_val = self._tile_or_trim(self.x_data, attr_val)
return attr_val
def assert_num_points(self, num_points):
"""Assert that the plot has the given number of points.
Parameters
----------
num_points : int
"""
if num_points != len(self.x_data):
raise AssertionError(
"Plot has incorrect number of points: {} (expected {})".format(
len(self.x_data), num_points))
@property
def x_data(self):
"""The x-values of the plotted data (1-D array)."""
all_x_data = []
if len(self.lines) > 0:
all_x_data.append(np.concatenate([x.get_xydata()[:, 0] for x in self.lines]))
if len(self.collections) > 0:
all_x_data.append(np.concatenate([x.get_offsets()[:, 0] for x in self.collections]))
return np.concatenate(all_x_data, axis=0)
def assert_x_data_equal(self, x_data):
"""Assert that the given x-data is equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.x_data`.
Parameters
----------
x_data : 1-D array-like
The expected x-data. The number of elements should be equal to the
(expected) number of plotted points.
"""
np.testing.assert_equal(self.x_data, x_data)
def assert_x_data_allclose(self, x_data, **kwargs):
"""Assert that the given x-data is almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.x_data`.
Parameters
----------
x_data : 1-D array-like
The expected x-data. The number of elements should be equal to the
(expected) number of plotted points.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(self.x_data, x_data, **kwargs)
@property
def y_data(self):
"""The y-values of the plotted data (1-D array)."""
all_y_data = []
if len(self.lines) > 0:
all_y_data.append(np.concatenate([x.get_xydata()[:, 1] for x in self.lines]))
if len(self.collections) > 0:
all_y_data.append(np.concatenate([x.get_offsets()[:, 1] for x in self.collections]))
return np.concatenate(all_y_data, axis=0)
def assert_y_data_equal(self, y_data):
"""Assert that the given y-data is equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.y_data`.
Parameters
----------
y_data : 1-D array-like
The expected y-data. The number of elements should be equal to the
(expected) number of plotted points.
"""
np.testing.assert_equal(self.y_data, y_data)
def assert_y_data_allclose(self, y_data, **kwargs):
"""Assert that the given y-data is almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.y_data`.
Parameters
----------
y_data : 1-D array-like
The expected y-data. The number of elements should be equal to the
(expected) number of plotted points.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(self.y_data, y_data, **kwargs)
@property
def colors(self):
"""The colors of the plotted points. Columns correspond to RGB values."""
all_colors = []
if len(self.lines) > 0:
for x in self.lines:
points = x.get_xydata()
colors = np.array([self._color2rgb(x.get_markerfacecolor())])
all_colors.append(self._tile_or_trim(points, colors))
if len(self.collections) > 0:
for x in self.collections:
points = x.get_offsets()
colors = np.array([self._color2rgb(i) for i in x.get_facecolors()])
all_colors.append(self._tile_or_trim(points, colors))
return np.concatenate(all_colors, axis=0)
def assert_colors_equal(self, colors):
"""Assert that the given colors are equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.colors`.
Parameters
----------
colors : single color, or list of expected line colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
"""
np.testing.assert_equal(
self.colors,
self._parse_expected_attr("colors", colors))
def assert_colors_allclose(self, colors, **kwargs):
"""Assert that the given colors are almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.colors`.
Parameters
----------
colors : single color, or list of expected line colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.colors,
self._parse_expected_attr("colors", colors),
**kwargs)
@property
def alphas(self):
"""The alpha values of the plotted points."""
all_alphas = []
if len(self.lines) > 0:
for x in self.lines:
points = x.get_xydata()
if x.get_alpha() is None:
alpha = np.array([self._color2alpha(x.get_markerfacecolor())])
else:
alpha = np.array([x.get_alpha()])
all_alphas.append(self._tile_or_trim(points, alpha))
if len(self.collections) > 0:
for x in self.collections:
points = x.get_offsets()
if x.get_alpha() is None:
alpha = np.array([self._color2alpha(i) for i in x.get_facecolors()])
else:
alpha = np.array([x.get_alpha()])
all_alphas.append(self._tile_or_trim(points, alpha))
return np.concatenate(all_alphas)
def assert_alphas_equal(self, alphas):
"""Assert that the given alpha values are equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.alphas`.
Parameters
----------
alphas :
The expected alpha values. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
"""
np.testing.assert_equal(
self.alphas, self._parse_expected_attr("alphas", alphas))
def assert_alphas_allclose(self, alphas, **kwargs):
"""Assert that the given alpha values are almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.alphas`.
Parameters
----------
alphas :
The expected alpha values. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.alphas,
self._parse_expected_attr("alphas", alphas),
**kwargs)
@property
def edgecolors(self):
"""The edge colors of the plotted points. Columns correspond to RGB values."""
all_colors = []
if len(self.lines) > 0:
for x in self.lines:
points = x.get_xydata()
colors = np.array([self._color2rgb(x.get_markeredgecolor())])
all_colors.append(self._tile_or_trim(points, colors))
if len(self.collections) > 0:
for x in self.collections:
points = x.get_offsets()
colors = np.array([self._color2rgb(i) for i in x.get_edgecolors()])
all_colors.append(self._tile_or_trim(points, colors))
return np.concatenate(all_colors, axis=0)
def assert_edgecolors_equal(self, edgecolors):
"""Assert that the given edge colors are equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.edgecolors`.
Parameters
----------
edgecolors : single color, or list of expected edge colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
"""
np.testing.assert_equal(
self.edgecolors,
self._parse_expected_attr("edgecolors", edgecolors))
def assert_edgecolors_allclose(self, edgecolors, **kwargs):
"""Assert that the given edge colors are almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.edgecolors`.
Parameters
----------
edgecolors : single color, or list of expected edge colors
Each color can be either a matplotlib color name (e.g. ``'r'`` or
``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or
a 4-tuple RGBA color.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.edgecolors,
self._parse_expected_attr("edgecolors", edgecolors),
**kwargs)
@property
def edgewidths(self):
"""The edge widths of the plotted points."""
all_colors = []
if len(self.lines) > 0:
for x in self.lines:
points = x.get_xydata()
colors = np.array([x.get_markeredgewidth()])
all_colors.append(self._tile_or_trim(points, colors))
if len(self.collections) > 0:
for x in self.collections:
points = x.get_offsets()
colors = np.array(x.get_linewidths())
all_colors.append(self._tile_or_trim(points, colors))
return np.concatenate(all_colors, axis=0)
def assert_edgewidths_equal(self, edgewidths):
"""Assert that the given edge widths are equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.edgewidths`.
Parameters
----------
edgewidths :
The expected edge widths. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
"""
np.testing.assert_equal(
self.edgewidths,
self._parse_expected_attr("edgewidths", edgewidths))
def assert_edgewidths_allclose(self, edgewidths, **kwargs):
"""Assert that the given edge widths are almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.edgewidths`.
Parameters
----------
edgewidths :
The expected edge widths. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.edgewidths,
self._parse_expected_attr("edgewidths", edgewidths),
**kwargs)
@property
def sizes(self):
"""The size of the plotted points. This is the square of
:attr:`~plotchecker.ScatterPlotChecker.markersizes`.
"""
all_sizes = []
if len(self.lines) > 0:
for x in self.lines:
points = x.get_xydata()
sizes = np.array([x.get_markersize() ** 2])
all_sizes.append(self._tile_or_trim(points, sizes))
if len(self.collections) > 0:
for x in self.collections:
points = x.get_offsets()
sizes = x.get_sizes()
all_sizes.append(self._tile_or_trim(points, sizes))
return np.concatenate(all_sizes, axis=0)
def assert_sizes_equal(self, sizes):
"""Assert that the given point sizes are equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.sizes`.
Parameters
----------
sizes :
The expected point sizes. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
"""
np.testing.assert_equal(
self.sizes,
self._parse_expected_attr("sizes", sizes))
def assert_sizes_allclose(self, sizes, **kwargs):
"""Assert that the given point sizes are almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.sizes`.
Parameters
----------
sizes :
The expected point sizes. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.sizes,
self._parse_expected_attr("sizes", sizes),
**kwargs)
@property
def markersizes(self):
"""The marker size of the plotted points. This is the square root of
:attr:`~plotchecker.ScatterPlotChecker.sizes`.
"""
return np.sqrt(self.sizes)
def assert_markersizes_equal(self, markersizes):
"""Assert that the given marker sizes are equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.markersizes`.
Parameters
----------
markersizes :
The expected marker sizes. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
"""
np.testing.assert_equal(
self.markersizes,
self._parse_expected_attr("markersizes", markersizes))
def assert_markersizes_allclose(self, markersizes, **kwargs):
"""Assert that the given marker sizes are almost equal to the plotted
:attr:`~plotchecker.ScatterPlotChecker.markersizes`.
Parameters
----------
markersizes :
The expected marker sizes. This should either be a single number
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
kwargs :
Additional keyword arguments to pass to
``numpy.testing.assert_allclose``
"""
np.testing.assert_allclose(
self.markersizes,
self._parse_expected_attr("markersizes", markersizes),
**kwargs)
@property
def markers(self):
"""The marker styles of the plotted points. Unfortunately, this
information is currently unrecoverable from matplotlib, and so this
attribute is not actually implemented.
"""
raise NotImplementedError("markers are unrecoverable for scatter plots")
def assert_markers_equal(self, markers):
"""Assert that the given marker styles are equivalent to the plotted
:attr:`~plotchecker.ScatterPlotChecker.markers`.
Note: information about marker style is currently unrecoverable from
collections in matplotlib, so this method is not actually implemented.
Parameters
----------
markers :
The expected marker styles. This should either be a single style
(which will apply to all the points) or an array with size equal to
the number of (expected) points.
"""
np.testing.assert_equal(
self.markers, self._parse_expected_attr("markers", markers))
| bsd-3-clause |
trustedanalytics/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_drop_tag_test.py | 11 | 8612 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.drop by tag functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import numpy
import random
from lxml import etree
class DicomDropTagsTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomDropTagsTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.query = ".//DicomAttribute[@keyword='KEYWORD']/Value/text()"
self.element_query = ".//DicomAttribute[@keyword='KEYWORD']"
self.count = self.dicom.metadata.count()
def test_drop_one_column_one_result_basic(self):
"""test drop with one unique key"""
# get the pandas frame for ease of access
metadata = self.dicom.metadata.to_pandas()
# grab a random row and extract the SOPInstanceUID from that record
random_row_index = random.randint(0, self.dicom.metadata.count() - 1)
random_row = metadata["metadata"][random_row_index]
xml_data = etree.fromstring(random_row.encode("ascii", "ignore"))
random_row_sopi_id = xml_data.xpath(self.query.replace("KEYWORD", "SOPInstanceUID"))[0]
expected_result = self._drop({"SOPInstanceUID": random_row_sopi_id})
# get all of the records with our randomly selected sopinstanceuid
# since sopinstanceuid is supposed to be unique for each record
# we should only get back the record which we randomly selected above
tag_number = xml_data.xpath(self.element_query.replace("KEYWORD", "SOPInstanceUID"))[0].get("tag")
self.dicom.drop_rows_by_tags({tag_number: random_row_sopi_id})
# check that our result is correct
# we should have gotten back from drop the row
# which we randomly selected
self.assertEqual(self.dicom.metadata.count(), self.count - 1)
self._compare_dicom_with_expected_result(expected_result)
def test_drop_one_col_multi_result_basic(self):
"""test drop by tag with one tag mult record result"""
metadata = self.dicom.metadata.to_pandas()
# get the first row and extract the patient id element from the metadata xml
first_row = metadata["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# we drop ourselves to get the expected result for this key value pair
expected_result = self._drop({"PatientID": first_row_patient_id})
# ask dicom to drop by tag, giving the tag-value pair
tag_number = xml_data.xpath(self.element_query.replace("KEYWORD", "PatientID"))[0].get("tag")
self.dicom.drop_rows_by_tags({tag_number: first_row_patient_id})
# compare our result to dicom's
self._compare_dicom_with_expected_result(expected_result)
def test_drop_multiple_columns_basic(self):
"""test drop tags with multiple tags"""
# here we will generate our drop
keyword_drop = {}
# we will get the first row and extract the patient id and institution name
metadata = self.dicom.metadata.to_pandas()["metadata"]
first_row = metadata[0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
first_row_body_part = xml_data.xpath(self.query.replace("KEYWORD", "BodyPartExamined"))[0]
# get the tag numbers and values
patient_id_tag = xml_data.xpath(self.element_query.replace("KEYWORD", "PatientID"))[0].get("tag")
body_part_tag = xml_data.xpath(self.element_query.replace("KEYWORD", "BodyPartExamined"))[0].get("tag")
keyword_drop["PatientID"] = first_row_patient_id
keyword_drop["BodyPartExamined"] = first_row_body_part
# we do the droping ourselves to generate the expected result
matching_records = self._drop(keyword_drop)
# we ask dicom to drop by tag with the tag-value pairs we extracted
self.dicom.drop_rows_by_tags({patient_id_tag: first_row_patient_id, body_part_tag: first_row_body_part})
# finally we ensure dicom's result matches ours
self._compare_dicom_with_expected_result(matching_records)
def test_drop_invalid_column(self):
"""test drop tags with invalid tag name"""
self.dicom.drop_rows_by_tags({"invalid keyword": "value"})
self.assertEqual(self.count, self.dicom.metadata.count())
def test_drop_multiple_invalid_columns(self):
"""test drop tags with mult invalid tag names"""
self.dicom.drop_rows_by_tags({"invalid": "bla", "another_invalid_col": "bla"})
self.assertEqual(self.count, self.dicom.metadata.count())
def test_drop_invalid_valid_col_mix(self):
"""test drop tags with a mix of valid and invalid tags"""
# first we will extract a valid tag number and value from the xml
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
patient_id_tag = xml_data.xpath(self.element_query.replace("KEYWORD", "PatientID"))[0].get("tag")
# now we ask dicom to drop by tags giving it both the valid tag-value
# pair we extracted and also an invalid tag-value pair
self.dicom.drop_rows_by_tags({patient_id_tag: patient_id, "Invalid": "bla"})
# since zero records match both criteria dicom should return no records
self.assertEqual(self.count, self.dicom.metadata.count())
def test_drop_invalid_type(self):
"""test drop tags with invalid param type"""
with self.assertRaisesRegexp(Exception, "incomplete format"):
self.dicom.drop_rows_by_tags(1)
def _drop(self, keywords):
"""generate our expected result by droping the records"""
# here we are generating the expected result from the key-value
# drop so that we can compare it to what dicom returns
# we will iterate through the dicom metadata to get all of the
# records which match our key-value criteria
matching_metadata = []
matching_pixeldata = []
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
pandas_pixeldata = self.dicom.pixeldata.to_pandas()["imagematrix"]
for (metadata, pixeldata) in zip(pandas_metadata, pandas_pixeldata):
ascii_xml = metadata.encode("ascii", "ignore")
xml = etree.fromstring(ascii_xml)
for keyword in keywords:
this_row_keyword_value = xml.xpath(self.query.replace("KEYWORD", keyword))[0]
if this_row_keyword_value != keywords[keyword]:
matching_metadata.append(ascii_xml)
matching_pixeldata.append(pixeldata)
return {"metadata": matching_metadata, "pixeldata": matching_pixeldata}
def _compare_dicom_with_expected_result(self, expected_result):
"""compare expected result with actual result"""
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
pandas_pixeldata = self.dicom.pixeldata.to_pandas()["imagematrix"]
for expected, actual in zip(expected_result["metadata"], pandas_metadata):
actual_ascii = actual.encode("ascii", "ignore")
self.assertEqual(actual_ascii, expected)
for expected, actual in zip(expected_result["pixeldata"], pandas_pixeldata):
numpy.testing.assert_equal(expected, actual)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
IGITUGraz/spore-nest-module | examples/pattern_matching_showcase/python/snn_utils/plotter/backends/mpl.py | 3 | 1721 | import logging
import matplotlib.pyplot as plt
import snn_utils.plotter as plotter
logger = logging.getLogger(__name__)
def configure_matplotlib():
plt.ion() # interactive mode
plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['axes.facecolor'] = 'white'
plt.switch_backend('TkAgg')
class MatplotlibWindow(plotter.PlotWindow):
def __init__(self, plot_builder, data_source, max_time_window=None, enabled=True):
plotter.PlotWindow.__init__(self, plot_builder, data_source, max_time_window)
self._enabled = enabled
self._layout_on_update = True
self._fig.canvas.mpl_connect('resize_event', self._on_resize)
self._fig.canvas.mpl_connect('button_press_event', self._on_click)
self._update_window_title()
def _create_figure(self):
return plt.figure()
def _draw(self):
self._fig.canvas.draw()
def _update_window_title(self):
self._fig.canvas.set_window_title("Plotter [{}]".format(["disabled", "enabled"][self._enabled]))
def _on_resize(self, resize_event):
self._layout_on_update = True
def _on_click(self, mouse_event):
if mouse_event.button == 3:
# left mouse button
self._enabled = not self._enabled
self._update_window_title()
logger.info("Plotter: drawing {}".format(["disabled", "enabled"][self._enabled]))
def draw(self):
self.get_figure().canvas.flush_events()
if self._layout_on_update:
self._layout_on_update = False
self._fig.tight_layout()
if not self._enabled:
self._draw()
if self._enabled:
plotter.PlotWindow.draw(self)
| gpl-2.0 |
h-mayorquin/M2_complexity_thesis | Analysis/regresion_simulation_total.py | 1 | 6874 | from functions import *
from analysis_functions import *
from plot_functions import *
import numpy as np
import cPickle
import matplotlib.pyplot as plt
from store_functions import *
import os
from sklearn.linear_model import LinearRegression # Import the learning algorithm
number_of_cells = 21
for cell_number in xrange(number_of_cells):
print '********************************************'
print 'cell number', cell_number
####################
# Load the data
####################
folder = './data/'
#cell_number = 8
cell = '_cell_' + str(cell_number)
quality = '_3000_21_'
stimuli_type = 'SparseNoise'
#stimuli_type = 'DenseNoise'
file_format = '.pickle'
filename_vm = folder + 'vm' + cell + quality + stimuli_type + file_format
filename_images = folder + 'images'+ quality + stimuli_type + file_format
print 'Stimuli Type', stimuli_type
# Save figures
save_figures = False
f = open(filename_vm,'rb')
vm = cPickle.load(f)
f = open(filename_images,'rb' )
ims = cPickle.load(f)
ims = ims / 100
ims = ims - 0.5
#ims = ims - 50.0
ims2 = ims**2
Nside = ims.shape[2]
f.close()
##########################33
#
##########################33
#Scale and size values
dt = 1.0 #milliseconds
dim = 21.0 # milliseconds
dh = 5.0 #milliseconds
kernel_duration = 150 # ms
kernel_size = int(kernel_duration / dh)
# Scale factors
input_to_image = dt / dim # Transforms input to image
kernel_to_input = dh / dt # Transforms kernel to input
image_to_input = dim / dt
## Input preprocesing
vm = downsample(vm,dt)
# Take the data that is going to be use from the total data
#Ntotal = 2 * 10 ** (4) # Number of data to use
Ntotal = vm.size
percentage = 1.0
Ntotal = int(percentage * vm.size)
# Take the minimum between the maximum and the choice
Ntotal = np.min((Ntotal, vm.size))
V = vm[0:int(Ntotal)]
vm = None # Liberate memory
# Size of the training set as a percentage of the data
alpha = 1 # training vs total
Ntraining = int(alpha * Ntotal)
# Construct the set of indexes (training, test, working)
Ntest = 10000
remove_start = int(kernel_size * kernel_to_input) # Number of images in a complete kernel
Ntest = np.min((Ntest, Ntotal - Ntraining)) # Take Ntest more examples to test, or the rest available
working_indexes = np.arange(Ntotal)
working_indexes = working_indexes.astype(int)
training_indexes = np.arange(remove_start, Ntraining)
test_indexes = np.arange(Ntraining,Ntraining + Ntest)
test_indxes = test_indexes.astype(int)
# Calculate kernel
kernel_times = np.arange(kernel_size)
kernel_times = kernel_times.astype(int) # Make the values indexes
# Delay indexes
delay_indexes = np.floor(kernel_times * kernel_to_input)
delay_indexes = delay_indexes.astype(int)
# Image Indexes
image_indexes = np.zeros(working_indexes.size)
image_indexes[working_indexes] = np.floor(working_indexes * input_to_image)
image_indexes = image_indexes.astype(int)
# Normalize the output
mean = np.mean(V[training_indexes])
V = V - mean
#std = np.std(V)
#V = V / std
#V = V / (np.max(V) - np.min(V))
#V = V / (np.max(np.abs(V)))
########################
# Calculate Regression
########################
# Number of parameters
Nparameters = Nside*Nside*2
# Create a vector with the indexes of the elements after the image
extract = np.arange(0, training_indexes.size, int(image_to_input), dtype=int)
training_indexes = training_indexes[extract]
# Initialize the kernels
h1 = np.zeros((kernel_size, Nside, Nside))
h2 = np.zeros((kernel_size, Nside, Nside))
# Targets
Y = V[training_indexes]
# Create the training matrix
X = np.zeros((training_indexes.size, Nparameters))
print 'X shape', X.shape
print 'Y shape', Y.shape
print 'file = ', filename_vm
for tau, delay_index in enumerate(delay_indexes):
# Create matrix X
for i, index in enumerate(training_indexes):
delay = image_indexes[index - delay_index]
f1 = np.reshape(ims[delay, ...], Nside*Nside)
f2 = np.reshape(ims2[delay, ...], Nside*Nside)
X[i, :] = np.concatenate((f1,f2))
# Store matrix X
#store_X(X, tau, filename)
# Making the predictions
predictor = LinearRegression(copy_X=False, fit_intercept=False)
predictor.fit(X, Y)
# Extract the parameters
parameters = predictor.coef_
# Order them as squares
h1_dis = parameters[0:Nparameters / 2]
h2_dis = parameters[Nparameters / 2 :]
# Store them
h1[tau,...] = h1_dis.reshape(Nside,Nside)
h2[tau,...] = h2_dis.reshape(Nside,Nside)
############
# Plotting
############
if save_figures:
symmetric = 0
colorbar = True
closest_square_to_kernel = int(np.sqrt(kernel_size)) ** 2
directory = './figures/'
formating='.pdf'
title = 'data_regresion_h1' + quality + stimuli_type
save_filename = directory + title + formating
plot_mutliplot_bilinear(closest_square_to_kernel, h1, colorbar=colorbar, symmetric=symmetric)
figure = plt.gcf() # get current figure
if remove_axis:
#Remove axis
for i in xrange(closest_square_to_kernel):
figure.get_axes()[i].get_xaxis().set_visible(False)
figure.get_axes()[i].get_yaxis().set_visible(False)
figure.set_size_inches(16, 12)
plt.savefig(save_filename, dpi = 100)
os.system("pdfcrop %s %s" % (save_filename, save_filename))
plt.show()
plot_mutliplot_bilinear(closest_square_to_kernel, h2, colorbar=colorbar, symmetric=symmetric)
title = 'data_regresion_h2' + quality+ stimuli_type
save_filename = directory + title + formating
figure = plt.gcf() # get current figure
if remove_axis:
# Remove axis
for i in xrange(closest_square_to_kernel):
figure.get_axes()[i].get_xaxis().set_visible(False)
figure.get_axes()[i].get_yaxis().set_visible(False)
figure.set_size_inches(16, 12)
plt.savefig(save_filename, dpi = 100)
os.system("pdfcrop %s %s" % (save_filename, save_filename))
plt.show()
###############
# Saving
###############
store_kernel_numpy(kernel_size, h1, h2, cell_number, stimuli_type)
| bsd-2-clause |
villalonreina/dipy | dipy/viz/tests/test_regtools.py | 19 | 1473 | import numpy as np
from dipy.viz import regtools
import numpy.testing as npt
from dipy.align.metrics import SSDMetric
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
# Conditional import machinery for matplotlib
from dipy.utils.optpkg import optional_package
_, have_matplotlib, _ = optional_package('matplotlib')
@npt.dec.skipif(not have_matplotlib)
def test_plot_2d_diffeomorphic_map():
# Test the regtools plotting interface (lightly).
mv_shape = (11, 12)
moving = np.random.rand(*mv_shape)
st_shape = (13, 14)
static = np.random.rand(*st_shape)
dim = static.ndim
metric = SSDMetric(dim)
level_iters = [200, 100, 50, 25]
sdr = SymmetricDiffeomorphicRegistration(metric,
level_iters,
inv_iter=50)
mapping = sdr.optimize(static, moving)
# Smoke testing of plots
ff = regtools.plot_2d_diffeomorphic_map(mapping, 10)
# Defualt shape is static shape, moving shape
npt.assert_equal(ff[0].shape, st_shape)
npt.assert_equal(ff[1].shape, mv_shape)
# Can specify shape
ff = regtools.plot_2d_diffeomorphic_map(mapping,
delta = 10,
direct_grid_shape=(7, 8),
inverse_grid_shape=(9, 10))
npt.assert_equal(ff[0].shape, (7, 8))
npt.assert_equal(ff[1].shape, (9, 10))
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
CopyChat/Plotting | Python/Psl_correct.py | 1 | 14522 | #!/usr/bin/env python
########################################
# to modify the NetCDF files
########################################
#First import the netcdf4 library
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import numpy as np
import sys,getopt
import math
import datetime as DT
import netcdftime
from netcdftime import utime
from datetime import datetime
from matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange, date2num, num2date
from dateutil.relativedelta import relativedelta
from numpy import arange
import numpy as np
import pylab as pl
import parser
import pandas as pd
from pandas import *
import os
from datetime import timedelta
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
import matplotlib.dates as dates
from matplotlib.dates import YEARLY, DateFormatter, rrulewrapper, RRuleLocator, drange
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
#=================================================== get opts input file
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o," "--ofile"):
outputfile = arg
print 'INputfile:', inputfile
print 'Outputfile:', outputfile
if __name__ == "__main__":
main(sys.argv[1:])
#===================================================
GCMvar='psl'
RELYvar='msl'
GCMinputf='psl_6hrPlev_HadGEM2-ES_historical_r1i1p1_198412010600-198512010000.nc'
#GCMinputf='psl_6hrPlev_HadGEM2-ES_historical_r1i1p1_198412010600-198512010000.standard.nc'
RELYinputf='msl_EIN75.198412010000-198512010000.nc.remap.nc.360.nc'
#RELYinputf='msl_EIN75.198412010000-198512010000.nc.remap.nc'
#===================================================
########################### units of time
#===================================================
#=================================================== to read
# Read en existing NetCDF file and create a new one
# f is going to be the existing NetCDF file from where we want to import data
GCMf=Dataset(GCMinputf,'r+') # r is for read only
RELYf=Dataset(RELYinputf,'r') # r is for read only
# Extract data from NetCDF file
print GCMf.variables.keys()
print GCMf.dimensions.keys()
GCMvar3D=GCMf.variables[GCMvar][:,:,:]
RELYvar3D=RELYf.variables[RELYvar][:,:,:]
LATITUDE=len(GCMvar3D[0,:,0])
LONGITUDE=len(GCMvar3D[0,0,:])
TIME=len(GCMvar3D[:,0,0])
TIME2=len(RELYvar3D[:,0,0])
#print Latitude,Longitude,Timesize
#=================================================== set up variables to use
GCMvar2D=GCMvar3D.reshape(TIME,-1)
RELYvar2D=RELYvar3D.reshape(TIME2,-1)
# create a 3D variable to hold the Mean bias as GCMvar3D in size.
#MeanBias=GCMvar3D
# NOTE: this method leading to error: when create the second GCMdf in the loop
# (t=2) GCMvar3D changes their value of first month to that of MonthlyMeanBias
# really bizarre. So, create it as 3D zeros array and then reshape it
MeanBias=np.zeros(TIME*LATITUDE*LONGITUDE).reshape(TIME,LATITUDE,LONGITUDE)
print MeanBias.shape
#---------------------------------------------------
# to test the reshap is working well or not
print '======== 3D :======='
print RELYvar3D
print '======== 2D :======='
print RELYvar2D
print '======== 2D reshape:======='
RELYvar2DT=RELYvar2D.reshape(TIME2,LATITUDE,LONGITUDE)
print RELYvar2DT
if (RELYvar3D.all()==RELYvar2DT.all()):
print 'OKOKOKOK'
#quit()
#---------------------------------------------------
#quit()
#=================================================== to datetime
GCMtime=netcdftime.num2date(GCMf.variables['time'][:],GCMf.variables['time'].units,calendar='360_day')
#GCMtime=netcdftime.num2date(GCMf.variables['time'][:],GCMf.variables['time'].units)
#print GCMtime[9].year
print type(GCMtime)
#print [str(i) for i in GCMtime[:]]
#GCMindex=[DT.datetime.strptime(t,'%Y-%m-%d %H:%M:%S') for t in [str(i) for i in GCMtime[:]]]
#print GCMindex
#print DT.datetime.strptime('2002-02-30 4:00:09','%Y-%m-%d %H:%M:%S')
# NOTE: this day donot exits in Python
#=================================================== to datetime
# NOTE: when I use the kew word 'calendar='360_day', it gives
# wrong value for ONLY this netcdf file, GCMtime is quite OK.
#cdftime = utime(RELYf.variables['time'].units,calendar='360_day')
#cdftime = utime(RELYf.variables['time'].units)
#RELYtime=[cdftime.num2date(t) for t in RELYf.variables['time'][:]]
RELYtime=netcdftime.num2date(RELYf.variables['time'][:],RELYf.variables['time'].units,calendar='360_day')
#RELYtime=netcdftime.num2date(RELYf.variables['time'][:],RELYf.variables['time'].units)
#print type(RELYtime)
#RELYindex=[DT.datetime.strptime(t,'%Y-%m-%d %H:%M:%S') for t in [str(i) for i in RELYtime[:]]]
#print type(RELYindex)
#d={'gcm':pd.Series(GCMvar2D,index=GCMtime),'rely':pd.Series(RELYvar2D,index=RELYtime)}
#ddf=pd.DataFrame(d)
# Series should be one dimension
#quit()
#for j in range(10,len(GCMvar3D[0,:,0])):
#=================================================== to DataFrame
#GCMdf=pd.DataFrame({'year':[t.year for t in GCMtime],
#'month':[t.month for t in GCMtime],
#'day':[t.day for t in GCMtime],
#'hour':[t.hour for t in GCMtime],
#'sdfj':GCMf.variables[GCMvar][:,j,:]})
# NOTE: this method is too time cosuming, about 7 hours to finish this code
#GCMdf=pd.DataFrame(GCMf.variables[GCMvar][:,0,0],GCMindex)
# NOTE: cannot convert 360_day np.arrary objects read from netcdf
# to datetime objects
#quit()
#---------------------------------------------------
GCMdf=pd.DataFrame(GCMvar2D)
GCMdf['year']=[t.year for t in GCMtime]
GCMdf['month']=[t.month for t in GCMtime]
GCMdf['day']=[t.day for t in GCMtime]
GCMdf['hour']=[t.hour for t in GCMtime]
#print GCMdf.dtypes
#print GCMdf.loc[0:9,['year','month','day','hour']]
#print 'GCMdf'
#print GCMdf.iloc[0:60,:]
#quit()
#=================================================== to DataFrame
#RELYdf=pd.DataFrame({'year':[t.year for t in RELYtime],
#'month':[t.month for t in RELYtime],
#'day':[t.day for t in RELYtime],
#'hour':[t.hour for t in RELYtime],
#RELYvar:RELYf.variables[RELYvar][:,j,:]})
# NOTE: this method is too time cosuming, about 7 hours to finish this code
#RELYdf=pd.DataFrame(RELYf.variables[RELYvar][:,0,0],RELYindex)
# NOTE: cannot convert 360_day np.arrary objects read from netcdf
# to datetime objects
RELYdf=pd.DataFrame(RELYvar2D,dtype='float32')
RELYdf['year']=[t.year for t in RELYtime]
RELYdf['month']=[t.month for t in RELYtime]
RELYdf['day']=[t.day for t in RELYtime]
RELYdf['hour']=[t.hour for t in RELYtime]
#print 'RELYdf'
#print RELYdf.iloc[2,:]
#print GCMdf.loc[0:9,['year','month','day','hour']]
#quit()
#=================================================== calculate
#print GCMdf.stack(0)
#print RELYdf.asfreq('6H',method='pad',calendar='360_day')
# NOTE: asfreq and stack are not satisfactory to this task.
# for the fromer is because of 360_day calendar.
print "---------"
##=================================================== for test calculation
#print RELYdf.loc[0]
## get monthly msl value
#print RELYdf.loc[0][:]
## get value of psl in the same year & month
#print GCMdf[(GCMdf['year'] == RELYdf['year'][0]) & (GCMdf['month'] == RELYdf['month'][0])][:]
##quit()
## values = value
#print GCMdf.dtypes
#print RELYdf.dtypes
#print RELYdf.iloc[0,:]
#print RELYdf.iloc[0,0:LONGITUDE*LATITUDE].shape #196
##quit()
#print np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0])
#& (GCMdf['month'] == RELYdf['month'][0])])
#print np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0])
#& (GCMdf['month'] == RELYdf['month'][0])])[:,0:LONGITUDE*LATITUDE].shape # 119
##quit()
#---------------------------------------------------
##print [t for t in np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0])
##& (GCMdf['month'] == RELYdf['month'][0])][:])]
#print np.array([np.subtract(t,RELYdf.iloc[0,0:LONGITUDE*LATITUDE])
#for t in np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0])
#& (GCMdf['month'] == RELYdf['month'][0])])[:,0:LONGITUDE*LATITUDE]])
#print np.array([np.subtract(t,RELYdf.iloc[0,0:LONGITUDE*LATITUDE])
#for t in np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0])
#& (GCMdf['month'] == RELYdf['month'][0])])[:,0:LONGITUDE*LATITUDE]]).shape
#---------------------------------------------------
#print RELYdf.iloc[1,:LONGITUDE*LATITUDE]
#print GCMdf.iloc[1,:LONGITUDE*LATITUDE]
#quit()
#=================================================== loop in time series:
K=0
for t in RELYdf.index:
#for t in [1,2]:
#print RELYdf.index
MonthlyMeanBias=np.array([np.subtract(x,RELYdf.iloc[t,0:LONGITUDE*LATITUDE])
for x in np.array(GCMdf[
(GCMdf['year'] == RELYdf['year'][t]) &
(GCMdf['month'] == RELYdf['month'][t]) &
(GCMdf['hour'] == RELYdf['hour'][t])
])[:,0:LONGITUDE*LATITUDE]])
#---------------------------------------------------
#print "GCMvar3D2:"
#print [x for x in GCMvar3D[0:30,:]] # right
#print "GCMdf:wrong"
#print GCMdf.iloc[0:60,:] # the first month is wrong
#print GCMdf.values
#--------------------------------------------------- petit test:
#print " GCM values in this month =======121"
#print np.array([x for x in np.array(GCMdf[
#(GCMdf['year'] == RELYdf['year'][t]) &
#(GCMdf['month'] == RELYdf['month'][t]) &
#(GCMdf['hour'] == RELYdf['hour'][t])
#])]).shape
#print np.array([x for x in np.array(GCMdf[
#(GCMdf['year'] == RELYdf['year'][t]) &
#(GCMdf['month'] == RELYdf['month'][t]) &
#(GCMdf['hour'] == RELYdf['hour'][t])
#])]).shape
#print " GCM values in this month =======212"
#GCMvalue= np.array([x for x in np.array(GCMdf[
#(GCMdf['year'] == RELYdf['year'][t]) &
#(GCMdf['month'] == RELYdf['month'][t]) &
#(GCMdf['hour'] == RELYdf['hour'][t])
#])])
##])[:,0:LONGITUDE*LATITUDE]])
#print GCMvalue
#print GCMvalue.shape
#---------------------------------------------------
##quit()
#print "RELY values in this month ======="
#print np.array(RELYdf.iloc[t,0:LONGITUDE*LATITUDE])
#print np.array(RELYdf.iloc[t,:])
#print np.array(RELYdf.iloc[t,:]).shape
#print "MonthlyMeanBias ======="
#print MonthlyMeanBias
print MonthlyMeanBias.shape
#quit()
#--------------------------------------------------- end of petit test:
L=len(MonthlyMeanBias[:,0])
MeanBias[K:K+L,:]=MonthlyMeanBias.reshape(L,LATITUDE,LONGITUDE)
#print " MeanBias ======="
#print MeanBias[K:K+L,j,:]
print " time = "+str(RELYtime[t])+" t= "+str(t)+", L= "+str(L)+", MeanBias len= "+str(len(MeanBias[K:K+L,0,0]))+" k= " +str(K)+", end= "+str(K+L)
K=K+L
# NOTE:needed to be reseted to zeros
#quit()
#=================================================== check the calculation
#NOTE: this examination is running in time and Lat(j) dimensions.
#print " NOTE: examination in Day (in month) and Latitude(j) dimensions."
#dateindex1=np.random.randint(0,L/2)
#lonindex1=np.random.randint(0,LONGITUDE*LATITUDE/2)
#dateindex2=np.random.randint(L/2,L)
#lonindex2=np.random.randint(L/2,LONGITUDE*LATITUDE)
#print "random Day index = " +str(dateindex1)
#print "random lonindex = " +str(lonindex1)
#lonindex1=43
#GCMvalue=np.array(GCMdf[
#(GCMdf['year'] == RELYdf['year'][t]) &
#(GCMdf['month'] == RELYdf['month'][t]) &
#(GCMdf['hour'] == RELYdf['hour'][t])
#])[dateindex1:dateindex2,lonindex1:lonindex2]
#])[:,lonindex1:lonindex1+20]
#print GCMvalue.shape
#MeanBiasValue=np.array([x for x in np.array(MonthlyMeanBias)]
#)[:,lonindex1:lonindex1+20]
#)[dateind,x1:dateindex2,lonindex1:lonindex2]
#print '============='
#print '============= GCM values'
#print GCMvalue[:,lonindex1:lonindex1+20]
#print '============='
#print '============= MonthlyMeanBias'
#print MonthlyMeanBias[:,lonindex1:lonindex1+20]
#print '============='
#print '============='
#print "GCM value - MeanBiasValue = "+str(GCMvalue[:,0:LONGITUDE]-MonthlyMeanBias)
#print "Defaule RELYvalue = "+str(RELYdf.iloc[t,lonindex1:lonindex1+20])
#for x in np.array(MonthlyMeanBias)[:,:]])[np.random.randint(0,L),np.random.randint(0,LONGITUDE*LATITUDE)]
#=================================================== print results
print "========================= GCM data:=========================="
print GCMvar3D
print "========================= Reanalysis Data:=========================="
print RELYvar3D
print "========================= montly Mean Bias:=========================="
print MeanBias
print "========================= Corrected GCM data:=========================="
#=================================================== check before WRITING:
print " GCMvar3D shape = " + str(GCMvar3D.shape)
print " MeanBias shape = " + str(MeanBias.shape)
#=================================================== Writing
GCMf.variables[GCMvar][:,:,:] = MeanBias
GCMf.close()
RELYf.close()
#=================================================== final correction
#=================================================== final correction
# produce the corrected GCM LBC: by MeanBias + future GCM
Futureinputf=('/Users/tang/climate/Bias-Correction/Future/'
'psl_6hrPlev_HadGEM2-ES_historical_r1i1p1_199412010600-199512010000.nc')
Futuref=Dataset(Futureinputf,'r+') # r is for read only
# Extract data from NetCDF file
print Futuref.variables.keys()
print Futuref.dimensions.keys()
FutureLBC=np.add(Futuref.variables[GCMvar][:,:,:], MeanBias)
print " shape of FutureLBC "+str(FutureLBC.shape)
print " starting to write... "
Futuref.variables[GCMvar][:,:,:] = FutureLBC
Futuref.close()
#=================================================== end of writing
#=================================================== delete the in processing file
quit()
| gpl-3.0 |
andim/scipy | tools/refguide_check.py | 29 | 23595 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
skip_types = (dict, str, unicode, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
def check_doctests(module, verbose, dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
# the namespace to run examples in
ns = {'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
# if MPL is available, use display-less backend
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim'}
def __init__(self, parse_namedtuples=True, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(ns))
a_got = eval(got, dict(ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except TypeError:
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
# Loop over non-deprecated items
results = []
all_success = True
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
all_success = False
if have_matplotlib:
plt.close('all')
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
return results
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=list(PUBLIC_SUBMODULES),
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
args = parser.parse_args(argv)
modules = []
names_dict = {}
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
RuthAngus/chronometer | chronometer/gyro_vs_iso_plot.py | 1 | 7021 | """
Making a plot of gyrochronology ages vs isochronal ages.
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import make_param_dict
from isochrones import StarModel
import teff_bv as tbv
from isochrones.mist import MIST_Isochrone
import corner
import emcee
import time
import h5py
import priors
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def lnprior(par):
age_prior = np.log(priors.age_prior(par[1]))
feh_prior = np.log(priors.feh_prior(par[2]))
distance_prior = np.log(priors.distance_prior(np.exp(par[3])))
return age_prior + feh_prior + distance_prior
def lnprob(par, mod):
if 0 < par[0] < 100 and 0 < par[1] < 11 and -5 < par[2] < 5 and \
0 < par[3] < 1e10 and 0 <= par[4] < 1:
prob = mod.lnlike(par) + lnprior(par)
if np.isfinite(prob):
return prob
else:
return -np.inf
else:
return -np.inf
def calculate_isochronal_age(param_dict, i, RESULTS_DIR):
"""
Do the MCMC using isochrones.py
"""
mist = MIST_Isochrone()
# Initial values
p = np.zeros(5)
p[0] = 1. # df.mass.values[i]
p[1] = 9. # np.log10(1e9*df.age.values[i])
p[2] = 0. # df.feh.values[i]
p[3] = 100. # 1./(df.tgas_parallax.values[i])*1e3
p[4] = 0. # df.Av.values[i]
if type(param_dict) == dict:
mod = StarModel(mist, **param_dict)
else:
mod = param_dict
# Run emcee
nwalkers, nsteps, ndim, mult = 32, 10000, len(p), 5
p0 = [1e-4*np.random.rand(ndim) + p for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[mod])
print("burning in...")
start = time.time()
pos, _, _ = sampler.run_mcmc(p0, nsteps)
end = time.time()
print("Time = ", (end - start)/60, "minutes")
print("Predicted run time = ", (end - start)/60 * mult, "minutes")
sampler.reset()
print("production run...")
start = time.time()
sampler.run_mcmc(pos, mult*nsteps)
end = time.time()
print("Time = ", (end - start)/60, "minutes")
flat = np.reshape(sampler.chain, (nwalkers*nsteps*mult, ndim))
# Plot figure
flat[:, 1] = 10**(flat[:, 1])*1e-9
fig = corner.corner(flat, labels=["Mass", "Age", "feh", "distance", "Av"])
fig.savefig(os.path.join(RESULTS_DIR, "{}_corner.png".format(i)))
f = h5py.File(os.path.join(RESULTS_DIR, "{}.h5".format(i)), "w")
data = f.create_dataset("samples", np.shape(flat))
data[:, :] = flat
f.close()
med = np.percentile(flat[:, 1], 50)
lower = np.percentile(flat[:, 1], 16)
upper = np.percentile(flat[:, 1], 84)
logerrm, logerrp = med - lower, upper - med
errp = logerrp/med
errm = logerrm/med
return med, errm, errp, flat[:, 1]
# errp = 10**(logerrp/med)*1e-9
# errm = 10**(logerrm/med)*1e-9
# return (10**med)*1e-9, errm, errp, flat[:, 1]
def calculate_gyrochronal_ages(par, period, bv):
"""
The gyro model.
"""
a, b, c, n = par
return (period / (a*(bv - c)**b))**(1./n) * 1e-3
def loop_over_stars(df, par, number, RESULTS_DIR, clobber=False):
"""
Calculate gyro and iso ages for each star.
Return lists of ages and uncertainties.
"""
try:
bvs = df.bv.values
except:
teffs = df.teff.values[:number]
fehs, loggs = df.feh.values[:number], df.logg.values[:number]
bvs = tbv.teff2bv(teffs, loggs, fehs)
periods = df.prot.values[:number]
gyro_age = calculate_gyrochronal_ages(par, periods, bvs[:number])
iso_ages, iso_errm, iso_errp, gyro_ages = [], [], [], []
for i, star in enumerate(df.jmag.values[:number]):
if df.prot.values[i] > 0. and bvs[i] > .4:
print("Calculating iso age for ", i, "of",
len(df.jmag.values[:number]), "...")
# Check whether an age exists already
fn = os.path.join(RESULTS_DIR, "{}.h5".format(i))
if clobber:
param_dict = make_param_dict(df, i)
param_dict = {k: param_dict[k] for k in param_dict if
np.isfinite(param_dict[k]).all()}
age, age_errm, age_errp, samps = \
calculate_isochronal_age(param_dict, i, RESULTS_DIR)
d = pd.DataFrame({"age": [age], "age_errm": [age_errm],
"age_errp": [age_errp]})
d.to_csv(fn)
else:
if os.path.exists(fn):
d = pd.read_csv(fn)
age, age_errm, age_errp = d.age.values, \
d.age_errm.values, d.age_errp.values
else:
param_dict = make_param_dict(df, i)
param_dict = {k: param_dict[k] for k in param_dict if
np.isfinite(param_dict[k]).all()}
age, age_errm, age_errp, samps = \
calculate_isochronal_age(param_dict, i, RESULTS_DIR)
d = pd.DataFrame({"age": [age], "age_errm": [age_errm],
"age_errp": [age_errp]})
d.to_csv(fn)
iso_ages.append(age)
iso_errm.append(age_errm)
iso_errp.append(age_errp)
gyro_ages.append(gyro_age[i])
return iso_ages, iso_errm, iso_errp, gyro_ages
def plot_gyro_age_against_iso_age(iso_ages, iso_errm, iso_errp, gyro_ages,
fn):
# ages = np.array([3.5, 6.5, 1., 10, 4.5])
xs = np.linspace(0, max(iso_ages), 100)
plt.clf()
plt.plot(xs, xs, ls="--")
plt.errorbar(gyro_ages, iso_ages, yerr=([iso_errm, iso_errp]), fmt="k.")
plt.xlabel("$\mathrm{Gyrochronal~age~(Gyr)}$")
plt.ylabel("$\mathrm{Isochronal~age~(Gyr)}$")
plt.subplots_adjust(bottom=0.15)
plt.savefig(os.path.join(RESULTS_DIR, fn))
if __name__ == "__main__":
# Preamble.
DATA_DIR = "/Users/ruthangus/projects/chronometer/chronometer/data"
# RESULTS_DIR = "/Users/ruthangus/projects/chronometer/chronometer/iso_ages"
RESULTS_DIR = "/Users/ruthangus/projects/chronometer/chronometer/"\
"fake_iso_ages"
# df = pd.read_csv(os.path.join(DATA_DIR, "kplr_tgas_periods.csv"))
# df = pd.read_csv(os.path.join(DATA_DIR, "action_data.csv"))
df = pd.read_csv(os.path.join(DATA_DIR, "fake_data.csv"))
par = np.array([.7725, .60, .4, .5189])
iso_ages, iso_errm, iso_errp, gyro_ages = loop_over_stars(df, par, 3,
RESULTS_DIR,
clobber=True)
plot_gyro_age_against_iso_age(iso_ages, iso_errm, iso_errp, gyro_ages,
"fake_data")
# "real_data_more")
| mit |
jeepsterboy/waveletanalysis | wavelet_analy_plot.py | 1 | 15570 | #!/usr/bin/env
"""
Wavelet_analy_plots.py
to be used with
Wavelet_analy.py
For P. Stabeno
Using Anaconda packaged Python
modifications for confidence intervals based on wave_matlab at
http://paos.colorado.edu/research/wavelets/
"""
#Standard packages
import os
#Science packages
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import matplotlib.ticker as plticker
import numpy as np
from scipy import stats
#Other Packages
import brewer2mpl as b2m #colorbrewer maps
"""----------------------------- plot setup ------------------------------------------"""
#set up color map with brewer2mpl, not mandatory as matplotlib as the colorbrewer maps but
# provides an easy way to limit the number of colors in a map
bmap = b2m.get_map('Blues', 'Sequential', 5, reverse=False)
bmap = bmap.mpl_colors
"""----------------------------- plots ----------------------------------------------"""
def plot_wavetransf(wa, T, S, sig95, time_base, plot_percentile=False):
"""plotting WaveTransform Power with confidence interval contour"""
fig = plt.figure(1)
ax = plt.subplot(1,1,1)
if plot_percentile:
#use following to contour at "percentiles variances" when using non-normalized data to match web output
csf =plt.contourf(T, S, wa.wavelet_power, levels=[ 0, stats.scoreatpercentile(wa.wavelet_power, 25), stats.scoreatpercentile(wa.wavelet_power, 50),
stats.scoreatpercentile(wa.wavelet_power, 75), stats.scoreatpercentile(wa.wavelet_power, 95),
stats.scoreatpercentile(wa.wavelet_power, 100)], colors=bmap)
else:
#use following to contour at "normalized variances" BAMS
csf =plt.contourf(T, S, wa.wavelet_power, levels=[ 0, 1,2,5,10], colors=bmap)
cbar = plt.colorbar(pad=.1, shrink=.5, format='%.4f', extend='both') #move and shrink colorbar
levels = [-99, 1] # values greater than 1 are significant
plt.contour(T, S, sig95,levels, colors='black', linewidth=5)
ax.set_yscale('log')
ax.grid(True)
# put the ticks at powers of 2 in the scale
ticks = np.unique(2 ** np.floor(np.log2(wa.scales)))[1:]
ax.yaxis.set_ticks(ticks)
ax.yaxis.set_ticklabels(ticks.astype(str))
ax.set_ylim(256, 0.5)
ax.set_ylabel('scales')
# second y scale with equivalent fourier periods to scales
# except with the ticks at the powers of 2
ax_fourier = ax.twinx()
ax_fourier.set_yscale('log')
# match the fourier ticks to the scale ticks
ax_fourier.set_yticks(ticks)
ax_fourier.set_yticklabels(ticks.astype(str))
ax_fourier.set_ylabel('fourier period (%s)' % time_base )
fourier_lim = [wa.fourier_period(i) for i in ax.get_ylim()]
ax_fourier.set_ylim(fourier_lim)
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
#fig.autofmt_xdate()
# shade the region between the edge and coi
C, S = wa.coi
ax.fill_between(x=C, y1=S, y2=wa.scales.max(), color='gray', alpha=0.5)
ax.set_xlim(wa.time.min(), wa.time.max())
#plt.show()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return (plt, fig)
def plot_wavetransf_time(x, wa, T, S, sig95, gs, signif_g, time_base, ylabel='Pressure (mb)', plot_percentile=False):
"""plotting contours w/global and timeseries"""
fig = plt.figure(2)
ax = plt.subplot2grid((3, 4), (1, 0), colspan=3, rowspan=2)
# use following with unnormalized data to match web output
if plot_percentile:
#use following to contour at "percentiles variances" when using non-normalized data to match web output
csf =plt.contourf(T, S, wa.wavelet_power, levels=[ 0, stats.scoreatpercentile(wa.wavelet_power, 25), stats.scoreatpercentile(wa.wavelet_power, 50),
stats.scoreatpercentile(wa.wavelet_power, 75), stats.scoreatpercentile(wa.wavelet_power, 95),
stats.scoreatpercentile(wa.wavelet_power, 100)], colors=bmap)
else:
#use following to contour at "normalized variances" BAMS
csf =plt.contourf(T, S, wa.wavelet_power, levels=[ 0, 1,2,5,10], colors=bmap)
levels = [-99, 1] # values greater than 1 are significant
plt.contour(T, S, sig95,levels, colors='black', linewidth=5)
ax.set_yscale('log')
ax.set_ylabel('Scales')
ax.grid(True)
# put the ticks at powers of 2 in the scale
ticks = np.unique(2 ** np.floor(np.log2(wa.scales)))[1:]
ax.yaxis.set_ticks(ticks)
ax.yaxis.set_ticklabels(ticks.astype(str))
ax.set_ylim(256, 0.5)
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
#fig.autofmt_xdate()
# shade the region between the edge and coi
C, S = wa.coi
ax.fill_between(x=C, y1=S, y2=wa.scales.max(), color='gray', alpha=0.5)
ax.set_xlim(wa.time.min(), wa.time.max())
"""
ax = plt.subplot2grid((3, 4), (0, 0), colspan=3, rowspan=1)
p1 = ax.plot(wa.time,x,'r', wa.time, wa.reconstruction(), 'b')
ax.set_xlim([wa.time.min(), wa.time.max()])
ax.set_xticklabels([])
ax.grid(True)
ax.set_ylabel(ylabel)
ax2= ax.twinx()
p2 = ax2.plot(wa.time,x-wa.reconstruction(), 'k')
ax2.set_xlim([wa.time.min(), wa.time.max()])
ax2.set_yticklabels([])
"""
ax = plt.subplot2grid((3, 4), (1, 3), colspan=1, rowspan=2)
p1 = ax.plot(gs,wa.scales, signif_g, wa.scales, 'k--')
ax.set_yscale('log')
ax.set_xscale('log')
ax.grid(True)
# put the ticks at powers of 2 in the scale
ticks = np.unique(2 ** np.floor(np.log2(wa.scales)))[1:]
ax.yaxis.set_ticks(ticks)
ax.set_yticklabels([])
#ax.yaxis.set_ticklabels(ticks.astype(str))
ax.set_ylim(256, 0.5)
# second y scale with equivalent fourier periods to scales
# except with the ticks at the powers of 2
ax_fourier = ax.twinx()
ax_fourier.set_yscale('log')
# match the fourier ticks to the scale ticks
ax_fourier.set_yticks(ticks)
ax_fourier.set_yticklabels(ticks.astype(str))
ax_fourier.set_ylabel('fourier period (%s)' % time_base )
fourier_lim = [wa.fourier_period(i) for i in ax.get_ylim()]
ax_fourier.set_ylim(fourier_lim)
#plt.show()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return(plt, fig)
def plot_wavetransf_time_zoom(x, wa, T, S, sig95, gs, signif_g, time_base, scalemin=0, scalemax=6, ylabel='Pressure (mb)', plot_percentile=False):
"""plotting contours w/global and timeseries"""
fig = plt.figure(22)
ax = plt.subplot2grid((3, 4), (1, 0), colspan=3, rowspan=2)
# use following with unnormalized data to match web output
if plot_percentile:
#use following to contour at "percentiles variances" when using non-normalized data to match web output
csf =plt.contourf(T, S, wa.wavelet_power, levels=[ 0, stats.scoreatpercentile(wa.wavelet_power, 25), stats.scoreatpercentile(wa.wavelet_power, 50),
stats.scoreatpercentile(wa.wavelet_power, 75), stats.scoreatpercentile(wa.wavelet_power, 95),
stats.scoreatpercentile(wa.wavelet_power, 100)], colors=bmap)
else:
#use following to contour at "normalized variances" BAMS
csf =plt.contourf(T, S, wa.wavelet_power, levels=[ 0, 1,2,5,10], colors=bmap)
levels = [-99, 1] # values greater than 1 are significant
plt.contour(T, S, sig95,levels, colors='black', linewidth=5)
ax.set_yscale('log')
ax.set_ylabel('Scales')
ax.grid(True)
# put the ticks at powers of 2 in the scale
ticks = np.unique(2 ** np.floor(np.log2(wa.scales)))[1:]
ax.yaxis.set_ticks(ticks)
ax.yaxis.set_ticklabels(ticks.astype(str))
ax.set_ylim(scalemax, scalemin)
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
#fig.autofmt_xdate()
# shade the region between the edge and coi
C, S = wa.coi
ax.fill_between(x=C, y1=S, y2=wa.scales.max(), color='gray', alpha=0.5)
ax.set_xlim(wa.time.min(), wa.time.max())
ax = plt.subplot2grid((3, 4), (1, 3), colspan=1, rowspan=2)
p1 = ax.plot(gs,wa.scales, signif_g, wa.scales, 'k--')
ax.set_yscale('log')
ax.set_xscale('log')
ax.grid(True)
# put the ticks at powers of 2 in the scale
ticks = np.unique(2 ** np.floor(np.log2(wa.scales)))[1:]
ax.yaxis.set_ticks(ticks)
ax.set_yticklabels([])
#ax.yaxis.set_ticklabels(ticks.astype(str))
ax.set_ylim(scalemax, scalemin)
ax.set_xticks([0.00000001, 0.000001, 0.0001, 0.01, 1, 100, 10000, 1000000])
# second y scale with equivalent fourier periods to scales
# except with the ticks at the powers of 2
ax_fourier = ax.twinx()
ax_fourier.set_yscale('log')
# match the fourier ticks to the scale ticks
ax_fourier.set_yticks(ticks)
ax_fourier.set_yticklabels(ticks.astype(str))
ax_fourier.set_ylabel('fourier period (%s)' % time_base )
fourier_lim = [wa.fourier_period(i) for i in ax.get_ylim()]
ax_fourier.set_ylim(fourier_lim)
#plt.show()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return(plt, fig)
def scaleogram(wa):
"""scaleogram"""
fig = plt.figure(3)
ax = plt.subplot(1,1,1)
csf = plt.imshow(wa.wavelet_power,
cmap='jet', aspect='auto', origin='lower', extent=[wa.time.min(),wa.time.max(),wa.scales.min(),wa.scales.max()])
ax.set_ylim(ax.get_ylim()[::-1]) #this reverses the yaxis (i.e. deep at the bottom)
cbar = plt.colorbar()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
return(plt, fig)
def fft_power_spec(x, time_base, Fs=1):
"""Plot original data (normalized) FFT power spectral density"""
fig = plt.figure(4)
ax1 = plt.subplot(1,1,1)
ax1.psd(x, NFFT=256, pad_to=None, noverlap=0, Fs=Fs)
ax1.set_ylabel('Power Spectral Density dB/(cycles/' + time_base + ')')
ax1.set_xlabel('Frequency (cycles/' + time_base + ')')
ax1.set_xscale('log')
#ax4.set_ylabel('')
#plt.title('overlap')
return(plt, fig)
def scale_ave_timeseries(scale_ave, time, scales_bin):
"""plotting WaveTransform scale averaged power for selected bins"""
fig = plt.figure(5)
ax1 = plt.subplot(1,1,1)
p1 = ax1.plot(time,scale_ave,'k')
ax1.set_xlim([time.min(), time.max()])
ax1.grid(True)
ax1.set_ylabel('Average power between scales ' + str(scales_bin[0]) + ' and ' + str(scales_bin[1]) + '')
ax1.set_xlabel('Time (UTC)')
ax1.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
#plt.show()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return(plt, fig)
def scale_ave_timeseries2D(scale_ave1, scale_ave2, time1, time2, scales_bin):
"""plotting WaveTransform scale averaged power for selected bins"""
fig = plt.figure(5)
ax1 = plt.subplot(1,1,1)
p1 = ax1.plot(time1,scale_ave1,'k', time2,scale_ave2,'r--')
ax1.set_xlim([time1.min(), time1.max()])
ax1.grid(True)
ax1.set_ylabel('Average power between scales ' + str(scales_bin[0]) + ' and ' + str(scales_bin[1]) + '')
ax1.set_xlabel('Time (UTC)')
ax1.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
#plt.show()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return(plt, fig)
def plot2dvar(data, x, y):
""" For plotting imshow of adcp data """
fig = plt.figure(33)
ax = plt.subplot(1,1,1)
csf = plt.imshow(data.T,
cmap='jet', aspect='auto', origin='lower', extent=[x.min(),x.max(),y.min(),y.max()])
ax.set_ylim(ax.get_ylim()[::-1]) #this reverses the yaxis (i.e. deep at the bottom)
cbar = plt.colorbar()
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return(plt, fig)
def timeseries_comp(data1, data2, time , ylabel):
fig = plt.figure(11)
ax = plt.subplot2grid((3, 4), (1, 0), colspan=3, rowspan=2)
p = ax.plot(time,data1)
plt.setp(p,'color', 'k', 'linestyle', '-', 'linewidth', .5)
p= ax.plot(time, data2)
plt.setp(p, 'color', 'r', 'linestyle', '-','linewidth', 2,)
ax.set_xlim([time.min(), time.max()])
ax.grid(True)
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
#fig.autofmt_xdate()
ax.set_ylabel(ylabel)
ax.set_xlabel('Time (UTC)')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return(plt, fig)
def plot_xwt_wavetransf(power, time, wa, T, S, sig95, pangle, time_base, scalemin=0, scalemax=6, ylabel='Pressure (mb)', plot_percentile=False):
"""plotting WaveTransform Power with confidence interval contour and phase vectors"""
fig = plt.figure(10)
ax = plt.subplot(1,1,1)
if plot_percentile:
#use following to contour at "percentiles variances" when using non-normalized data to match web output
csf =plt.contourf(T, S, power, levels=[ 0, stats.scoreatpercentile(power, 25), stats.scoreatpercentile(power, 50),
stats.scoreatpercentile(power, 75), stats.scoreatpercentile(power, 95),
stats.scoreatpercentile(power, 100)], colors=bmap)
else:
#use following to contour at "normalized variances" BAMS
csf =plt.contourf(T, S, power, levels=[ 0, .2,.4,.6,.8,1], colors=bmap)
cbar = plt.colorbar(pad=.1, shrink=.5, format='%.4f', extend='both') #move and shrink colorbar
levels = [-99, 1] # values greater than 1 are significant
plt.contour(T, S, sig95,levels, colors='black', linewidths=1)
ax.set_yscale('log')
ax.grid(True)
# plot phase relationship
arr_dens = [60, 30]
arr_densx = np.round( len(time) / arr_dens[0] )
arr_densy = np.round( len(wa.scales) / arr_dens[1] )
if arr_dens == 0:
arr_dens = 1
plt.quiver(T[::arr_densy,::arr_densx],S[::arr_densy,::arr_densx],(np.cos(pangle))[::arr_densy,::arr_densx],(np.sin(pangle))[::arr_densy,::arr_densx],\
width=.00125, headwidth=4, headlength=4, alpha=0.6, color='k')
# put the ticks at powers of 2 in the scale
ticks = np.unique(2 ** np.floor(np.log2(wa.scales)))[1:]
ax.yaxis.set_ticks(ticks)
ax.yaxis.set_ticklabels(ticks.astype(str))
ax.set_ylim(scalemax, scalemin)
ax.set_ylabel('scales')
# second y scale with equivalent fourier periods to scales
# except with the ticks at the powers of 2
ax_fourier = ax.twinx()
ax_fourier.set_yscale('log')
# match the fourier ticks to the scale ticks
ax_fourier.set_yticks(ticks)
ax_fourier.set_yticklabels(ticks.astype(str))
ax_fourier.set_ylabel('fourier period (%s)' % time_base )
fourier_lim = [wa.fourier_period(i) for i in ax.get_ylim()]
ax_fourier.set_ylim(fourier_lim)
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
# shade the region between the edge and coi
C, S = wa.coi
ax.fill_between(x=C, y1=S, y2=wa.scales.max(), color='gray', alpha=0.5)
ax.set_xlim(time.min(), time.max())
#plt.show()
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*2, DefaultSize[1]) )
return (plt, fig)
| mit |
kagayakidan/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
MikeLing/shogun | examples/undocumented/python/graphical/interactive_gp_demo.py | 5 | 14167 | #
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 3 of the License, or
# (at your option) any later version.
#
# Written (C) 2012 Heiko Strathmann, based on interactive_svm_demo by Christian
# Widmer which itself is based on PyQT Demo by Eli Bendersky
#
"""
Shogun Gaussian processes demo based on interactive SVM demo by Christian \
Widmer and Soeren Sonnenburg which itself is based on PyQT Demo by Eli Bendersky
Work to be done on parameter (e.g. kernel width) optimization.
Heiko Strathmann/Cameron Lai
License: GPLv3
"""
import sys, os, csv
import scipy as SP
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from shogun import *
from shogun import *
from shogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.create_toy_data()
self.on_show()
def on_show(self):
self.axes.clear()
self.axes.plot(self.x, self.y, 'ro')
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.grid(True)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
x=SP.append(self.x, event.xdata)
self.y=SP.append(self.y, event.ydata)
self.x= x[:,SP.newaxis]
self.on_show()
self.status_text.setText("New data point: x=%f, y=%f"%(event.xdata, event.ydata))
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
#load_action = self.create_action("&Load file",
# shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
#self.add_actions(self.file_menu,
# (load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def clear_data(self):
self.x=SP.array([])
self.y=SP.array([])
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.on_show()
self.status_text.setText("Data cleared")
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "Linear":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "Polynomial":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "Gaussian":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def get_stats(self):
num_train = len(self.x)
str_train = "num training points: %i" % num_train
str_test = "num training points: %s" % self.nTest.text()
return (str_train, str_test)
def create_toy_data(self):
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
x = SP.arange(self.xmin,self.xmax,(self.xmax-self.xmin)/100.0)
C = 2 #offset
b = 0
y = b*x + C + float(self.sine_amplitude.text())*SP.sin(float(self.sine_freq.text())*x)
# dy = b + 1*SP.cos(x)
y += float(self.noise_level.text())*random.randn(y.shape[0])
self.y=y-y.mean()
self.x= x[:,SP.newaxis]
self.on_show()
def learn_kernel_width(self):
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels, crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
self.sigma.setText("1.0")
self.plot_gp()
def plot_gp(self):
feat_train = RealFeatures(self.x.T)
labels = RegressionLabels(self.y)
#[x,y]=self.data.get_data()
#feat_train=RealFeatures(x.T)
#labels=RegressionLabels(y)
n_dimensions = 1
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
#covar_parms = SP.log([2])
#hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
# construct covariance function
width=float(self.sigma.text())
degree=int(self.degree.text())
if kernel_name == "Linear":
gk = LinearKernel(feat_train, feat_train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Polynomial":
gk = PolyKernel(feat_train, feat_train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Gaussian":
gk = GaussianKernel(feat_train, feat_train, width)
#SECF = GaussianKernel(feat_train, feat_train, width)
#covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(gk, feat_train, zmean, labels, likelihood);
inf.get_negative_marginal_likelihood()
# location of unispaced predictions
x_test = array([linspace(self.xmin,self.xmax, self.nTest.text())])
feat_test=RealFeatures(x_test)
gp = GaussianProcessRegression(inf)
gp.train()
covariance = gp.get_variance_vector(feat_test)
predictions = gp.get_mean_vector(feat_test)
#print "x_test"
#print feat_test.get_feature_matrix()
#print "mean predictions"
#print predictions.get_labels()
#print "covariances"
#print covariance.get_labels()
self.status_text.setText("Negative Log Marginal Likelihood = %f"%(inf.get_negative_marginal_likelihood()))
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.hold(True)
x_test=feat_test.get_feature_matrix()[0]
self.axes.plot(x_test, predictions, 'b-x')
#self.axes.plot(x_test, labels.get_labels(), 'ro')
self.axes.plot(self.x, self.y, 'ro')
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()-3*sqrt(covariance.get_labels()))
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()+3*sqrt(covariance.get_labels()))
upper = predictions+3*sqrt(covariance)
lower = predictions-3*sqrt(covariance)
self.axes.fill_between(x_test, lower, upper, color='grey')
self.axes.hold(False)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def create_main_frame(self):
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "Gaussian")
self.kernel_combo.insertItem(-1, "Polynomial")
self.kernel_combo.insertItem(-1, "Linear")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
log_label = QLabel("Data points")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
self.sine_freq = QLineEdit()
self.sine_freq.setText("1.0")
self.sine_amplitude = QLineEdit()
self.sine_amplitude.setText("1.0")
self.sigma = QLineEdit()
self.sigma.setText("1.2")
self.degree = QLineEdit()
self.degree.setText("2")
self.noise_level = QLineEdit()
self.noise_level.setText("1")
self.nTest = QLineEdit()
self.nTest.setText("100")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(QLabel('Sine data setting: '))
spins_hbox.addWidget(QLabel('Sine Freq.'))
spins_hbox.addWidget(self.sine_freq)
spins_hbox.addWidget(QLabel('Sine Amplitude'))
spins_hbox.addWidget(self.sine_amplitude)
spins_hbox.addWidget(QLabel('Noise Level'))
spins_hbox.addWidget(self.noise_level)
spins_hbox.addStretch(1)
spins_hbox2 = QHBoxLayout()
spins_hbox2.addWidget(QLabel('Kernel Setting: '))
spins_hbox2.addWidget(QLabel('Type'))
spins_hbox2.addWidget(self.kernel_combo)
spins_hbox2.addWidget(QLabel("Width"))
spins_hbox2.addWidget(self.sigma)
spins_hbox2.addWidget(QLabel("Degree"))
spins_hbox2.addWidget(self.degree)
spins_hbox2.addStretch(1)
spins_hbox3 = QHBoxLayout()
spins_hbox3.addWidget(QLabel('Test Setting: '))
spins_hbox3.addWidget(QLabel('Number of test points'))
spins_hbox3.addWidget(self.nTest)
spins_hbox3.addStretch(1)
self.show_button = QPushButton("&Train GP")
self.connect(self.show_button, SIGNAL('clicked()'), self.plot_gp)
self.gen_sine_data_button = QPushButton("&Generate Sine Data")
self.connect(self.gen_sine_data_button, SIGNAL('clicked()'), self.create_toy_data)
self.clear_data_button = QPushButton("&Clear")
self.connect(self.clear_data_button, SIGNAL('clicked()'), self.clear_data)
self.learn_kernel_button = QPushButton("&Learn Kernel Width and train GP")
self.connect(self.learn_kernel_button, SIGNAL('clicked()'), self.learn_kernel_width)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(QLabel("Data Points"))
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_vbox.addWidget(QLabel("Settings"))
right2_vbox.addWidget(self.gen_sine_data_button)
right2_vbox.addWidget(self.clear_data_button)
right2_vbox.addWidget(self.show_button)
#right2_vbox.addWidget(self.learn_kernel_button)
right2_vbox.addLayout(spins_hbox)
right2_vbox.addLayout(spins_hbox2)
right2_vbox.addLayout(spins_hbox3)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| gpl-3.0 |
bsipocz/astropy | astropy/visualization/tests/test_norm.py | 1 | 8374 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy import ma
from numpy.testing import assert_allclose
from astropy.visualization.mpl_normalize import ImageNormalize, simple_norm, imshow_norm
from astropy.visualization.interval import ManualInterval
from astropy.visualization.stretch import SqrtStretch
try:
import matplotlib # pylint: disable=W0611
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
DATA = np.linspace(0., 15., 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
@pytest.mark.skipif('HAS_MATPLOTLIB')
def test_normalize_error_message():
with pytest.raises(ImportError) as exc:
ImageNormalize()
assert (exc.value.args[0] == "matplotlib is required in order to use "
"this class.")
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., interval=ManualInterval,
clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch,
clip=True)
def test_scalar(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(DATA)
expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == 2.
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(mdata)
expected = [0., 0.35355339, 1., 0.93541435, 1., 1.]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear')
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm = simple_norm(DATA2, stretch='sqrt')
assert_allclose(norm(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5)
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch='power', power=power)
assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch='log')
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch='log', log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch='asinh')
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch='asinh', asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1. / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear', min_cut=1.)
assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch='linear', percent=99.)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5,
max_percent=99.5)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch='invalid')
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_imshow_norm():
image = np.random.randn(10, 10)
ax = plt.subplot()
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
imshow_norm(image, ax=ax, vmin=0, vmax=1)
# vmin/vmax "shadow" the MPL versions, so imshow_only_kwargs allows direct-setting
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(vmin=0, vmax=1))
# but it should fail for an argument that is not in ImageNormalize
with pytest.raises(ValueError):
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(cmap='jet'))
# make sure the pyplot version works
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
| bsd-3-clause |
clemenshage/grslra | experiments/6_grslra/forecasting_airline/plot_forecasting_airline_new.py | 1 | 2160 | from matplotlib import pyplot as plt
import matplotlib
from scipy.io import loadmat
import numpy as np
matplotlib.rcParams.update({'font.size': 24})
matplotlib.rcParams.update({'text.usetex': True})
data_grslra = np.load('predictions_new.npz')
data_slrabyf = np.load('predictions_new_slrabyf.npz')
data_arima = loadmat('arima/predictions_new_arima.mat')
x = data_grslra["x"]
l_grslra = data_grslra["l_grslra"]
m = data_grslra["m"]
N = data_grslra["N"]
N_f = data_grslra["N_f"]
t_grslra = data_grslra["t"]
l_slrabyf = data_slrabyf["l_slrabyf"]
t_slrabyf = data_slrabyf["t_slrabyf"]
l_arima = data_arima["x_hat"].flatten()
t_arima = data_arima["t"].flatten()
plt.figure(figsize=(15,5))
plt.hold(True)
plt.plot(x, label="data", linewidth=3, color='black',zorder=0)
plt.plot(l_grslra, label="GRSLRA", linewidth=2, color='g', zorder=3)
plt.plot(l_slrabyf, label="SLRAbyF", linewidth=2, color='r', zorder=1)
plt.plot(l_arima, label="ARIMA", linewidth=2, color='b', zorder=2)
# startpoint=60
startpoint = 2*m-1 + N_f
norm_data = np.linalg.norm(x[startpoint: N])
err_grslra = np.linalg.norm(x[startpoint : N] - l_grslra[startpoint : N]) / norm_data
err_slrabyf = np.linalg.norm(x[startpoint : N] - l_slrabyf[startpoint : N]) / norm_data
err_arima = np.linalg.norm(x[startpoint : N] - l_arima[startpoint : N]) / norm_data
avtime_grslra = np.mean(t_grslra[startpoint : N])
avtime_slrabyf = np.mean(t_slrabyf[startpoint : N])
avtime_arima = np.mean(t_arima[startpoint : N])
plt.xticks(np.arange(0,x.size, step=24), np.arange(1996,2015,2))
plt.yticks(np.arange(0,1e5, step=2e4), np.arange(0,10,2))
plt.ylabel("$\\times 10^8$ monthly passengers")
axes = plt.gca()
axes.set_ylim([0, 90000])
axes.set_xlim(2*m - 1, N + 1)
plt.grid(b=True, which='both', color='0.65',linestyle='-')
plt.tight_layout()
plt.legend()
print "\nNew data set\n"
print "GRSLRA:\n"
print "relative error: ", err_grslra, "average time: ", avtime_grslra, "\n"
print "SLRAbyF:\n"
print "relative error: ", err_slrabyf, "average time: ", avtime_slrabyf, "\n"
print "ARIMA:\n"
print "relative error: ", err_arima, "average time: ", avtime_arima, "\n"
plt.savefig('airline_new.pdf', dpi=200) | mit |
bundgus/python-playground | matplotlib-playground/examples/pylab_examples/toggle_images.py | 1 | 1371 | #!/usr/bin/env python
""" toggle between two images by pressing "t"
The basic idea is to load two images (they can be different shapes) and plot
them to the same axes with hold "on". Then, toggle the visible property of
them using keypress event handling
If you want two images with different shapes to be plotted with the same
extent, they must have the same "extent" property
As usual, we'll define some random images for demo. Real data is much more
exciting!
Note, on the wx backend on some platforms (e.g., linux), you have to
first click on the figure before the keypress events are activated.
If you know how to fix this, please email us!
"""
import matplotlib.pyplot as plt
import numpy as np
# two images x1 is initially visible, x2 is not
x1 = np.random.random((100, 100))
x2 = np.random.random((150, 175))
# arbitrary extent - both images must have same extent if you want
# them to be resampled into the same axes space
extent = (0, 1, 0, 1)
im1 = plt.imshow(x1, extent=extent)
im2 = plt.imshow(x2, extent=extent, hold=True)
im2.set_visible(False)
def toggle_images(event):
'toggle the visible state of the two images'
if event.key != 't':
return
b1 = im1.get_visible()
b2 = im2.get_visible()
im1.set_visible(not b1)
im2.set_visible(not b2)
plt.draw()
plt.connect('key_press_event', toggle_images)
plt.show()
| mit |
evgchz/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
ningchi/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
asurve/systemml | scripts/perftest/python/google_docs/stats.py | 15 | 3540 | #!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import argparse
import os
import pprint
from os.path import join
import matplotlib.pyplot as plt
from gdocs_utils import auth
# Dict
# {algo_name : [algo_1.0': t1, 'algo_2.0': t2]}
def get_formatted_data(sheet_data):
"""
Read all the data from google sheets and transforms it into a dictionary that can be
use for plotting later
"""
algo_dict = {}
for i in sheet_data:
inn_count = 0
data = []
for key, val in i.items():
inn_count += 1
if inn_count < 3:
data.append(key)
data.append(val)
if inn_count == 2:
t1, v1, _, v2 = data
if len(str(v2)) > 0:
if v1 not in algo_dict:
algo_dict[v1] = [{t1: v2}]
else:
algo_dict[v1].append({t1: v2})
inn_count = 0
data = []
return algo_dict
def plot(x, y, xlab, ylab, title):
"""
Save plots to the current folder based on the arguments
"""
CWD = os.getcwd()
PATH = join(CWD, title)
width = .35
plt.bar(x, y, color="red", width=width)
plt.xticks(x)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.savefig(PATH + '.png')
print('Plot {} generated'.format(title))
return plt
# Example Usage
# ./stats.py --auth ../key/client_json.json --exec-mode singlenode
if __name__ == '__main__':
execution_mode = ['hybrid_spark', 'singlenode']
cparser = argparse.ArgumentParser(description='System-ML Statistics Script')
cparser.add_argument('--auth', help='Location to read auth file',
required=True, metavar='')
cparser.add_argument('--exec-type', help='Execution mode', choices=execution_mode,
required=True, metavar='')
cparser.add_argument('--plot', help='Algorithm to plot', metavar='')
args = cparser.parse_args()
sheet = auth(args.auth, args.exec_type)
all_data = sheet.get_all_records()
plot_data = get_formatted_data(all_data)
if args.plot is not None:
print(plot_data[args.plot])
title = args.plot
ylab = 'Time in sec'
xlab = 'Version'
x = []
y = []
for i in plot_data[args.plot]:
version = list(i.keys())[0]
time = list(i.values())[0]
y.append(time)
x.append(version)
x = list(map(lambda x: float(x.split('_')[1]), x))
plot(x, y, xlab, ylab, title)
else:
pprint.pprint(plot_data, width=1) | apache-2.0 |
jrbourbeau/cr-composition | processing/legacy/anisotropy/ks_test/save_pvals.py | 1 | 2267 | #!/usr/bin/env python
import os
import argparse
import numpy as np
import healpy as hp
from scipy.stats import ks_2samp
import pandas as pd
import comptools as comp
import comptools.anisotropy.anisotropy as anisotropy
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
p.add_argument('--infiles_sample_0', dest='infiles_sample_0', nargs='*',
help='Input reference map files')
p.add_argument('--infiles_sample_1', dest='infiles_sample_1', nargs='*',
help='Input reference map files')
p.add_argument('--outfile', dest='outfile',
help='Output DataFrame file')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Option to overwrite reference map file, '
'if it alreadu exists')
args = p.parse_args()
if args.infiles_sample_0 is None or args.infiles_sample_1 is None:
raise ValueError('Input files must be specified')
elif len(args.infiles_sample_0) != len(args.infiles_sample_1):
raise ValueError('Both samples of input files must be the same length')
if args.outfile is None:
raise ValueError('Outfile must be specified')
else:
comp.check_output_dir(args.outfile)
data_dict = {'ks_statistic': [], 'pval': []}
# Read in all the input maps
kwargs_relint = {'smooth': 20, 'scale': None, 'decmax': -55}
for file_0, file_1 in zip(args.infiles_sample_0, args.infiles_sample_1):
relint_0 = anisotropy.get_map(files=file_0, name='relint', **kwargs_relint)
relint_1 = anisotropy.get_map(files=file_1, name='relint', **kwargs_relint)
ri_0, ra, ra_err = anisotropy.get_proj_relint(relint_0, n_bins=24)
ri_1, ra, ra_err = anisotropy.get_proj_relint(relint_1, n_bins=24)
ks_statistic, pval = ks_2samp(ri_0, ri_1)
data_dict['ks_statistic'].append(ks_statistic)
data_dict['pval'].append(pval)
with pd.HDFStore(args.outfile) as output_store:
dataframe = pd.DataFrame(data_dict)
output_store.put('dataframe', dataframe, format='table', data_columns=True)
| mit |
eickenberg/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
vancan1ty/SEAT | demos/sliderdemo.py | 1 | 1236 | #http://matplotlib.org/examples/widgets/slider_demo.html
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
a0 = 5
f0 = 3
s = a0*np.sin(2*np.pi*f0*t)
l, = plt.plot(t,s, lw=2, color='red')
plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
sfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
sfreq.on_changed(update)
samp.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sfreq.reset()
samp.reset()
button.on_clicked(reset)
rax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
plt.show()
| gpl-3.0 |
dparks1134/STAMP | stamp/plugins/groups/plots/HeatmapPlot.py | 1 | 21704 | #=======================================================================
# Author: Donovan Parks
#
# Heatmap for two groups.
#
# Copyright 2013 Donovan Parks
#
# This file is part of STAMP.
#
# STAMP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# STAMP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with STAMP. If not, see <http://www.gnu.org/licenses/>.
#=======================================================================
import sys
from PyQt4 import QtGui, QtCore
from stamp.plugins.groups.AbstractGroupPlotPlugin import AbstractGroupPlotPlugin, TestWindow, ConfigureDialog
from stamp.plugins.groups.plots.configGUI.HeatmapPlotUI import Ui_HeatmapPlotDialog
from stamp.plugins.PlotEventHandler import PlotEventHandler
from matplotlib import pylab
import matplotlib as mpl
from matplotlib.patches import Rectangle
import numpy
import scipy.cluster.hierarchy as cluster
import scipy.spatial.distance as dist
class HeatmapPlot(AbstractGroupPlotPlugin):
'''
Heatmap plot.
'''
def __init__(self, preferences, parent=None):
AbstractGroupPlotPlugin.__init__(self, preferences, parent)
self.discreteColourMap = mpl.colors.ListedColormap([(141 / 255.0, 211 / 255.0, 199 / 255.0), (255 / 255.0, 255 / 255.0, 179 / 255.0), \
(190 / 255.0, 186 / 255.0, 218 / 255.0), (251 / 255.0, 128 / 255.0, 114 / 255.0), \
(128 / 255.0, 177 / 255.0, 211 / 255.0), (253 / 255.0, 180 / 255.0, 98 / 255.0), \
(179 / 255.0, 222 / 255.0, 105 / 255.0), (252 / 255.0, 205 / 255.0, 229 / 255.0), \
(217 / 255.0, 217 / 255.0, 217 / 255.0), (188 / 255.0, 128 / 255.0, 189 / 255.0), \
(204 / 255.0, 235 / 255.0, 197 / 255.0), (255 / 255.0, 237 / 255.0, 111 / 255.0)])
self.preferences = preferences
self.name = 'Heatmap plot'
self.type = 'Exploratory'
self.bPlotFeaturesIndividually = False
self.settings = preferences['Settings']
self.fieldToPlot = self.settings.value('group: ' + self.name + '/field to plot', 'Proportion of sequences (%)').toString()
self.bPlotOnlyActiveFeatures = self.settings.value('group: ' + self.name + '/plot only active features', False).toBool()
self.figWidth = self.settings.value('group: ' + self.name + '/width', 7.0).toDouble()[0]
self.figHeight = self.settings.value('group: ' + self.name + '/height', 7.0).toDouble()[0]
self.sortColMethod = self.settings.value('group: ' + self.name + '/sort col method', 'Average neighbour (UPGMA)').toString()
self.sortRowMethod = self.settings.value('group: ' + self.name + '/sort row method', 'Average neighbour (UPGMA)').toString()
self.bShowColDendrogram = self.settings.value('group: ' + self.name + '/show col dendrogram', True).toBool()
self.bShowRowDendrogram = self.settings.value('group: ' + self.name + '/show row dendrogram', True).toBool()
self.colourmap = self.settings.value('group: ' + self.name + '/colourmap', 'Blues').toString()
self.legendPos = self.settings.value('group: ' + self.name + '/legend position', 3).toInt()[0]
self.clusteringColThreshold = self.settings.value('group: ' + self.name + '/clustering col threshold', 0.75).toDouble()[0]
self.clusteringRowThreshold = self.settings.value('group: ' + self.name + '/clustering row threshold', 0.75).toDouble()[0]
self.dendrogramHeight = self.settings.value('group: ' + self.name + '/dendrogram col height', 1.5).toDouble()[0]
self.dendrogramWidth = self.settings.value('group: ' + self.name + '/dendrogram row width', 1.5).toDouble()[0]
def mirrorProperties(self, plotToCopy):
super(HeatmapPlot, self).mirrorProperties(plotToCopy)
self.bPlotFeaturesIndividually = False
self.name = plotToCopy.name
self.fieldToPlot = plotToCopy.fieldToPlot
self.bPlotOnlyActiveFeatures = plotToCopy.bPlotOnlyActiveFeatures
self.figWidth = plotToCopy.figWidth
self.figHeight = plotToCopy.figHeight
self.sortColMethod = plotToCopy.sortColMethod
self.sortRowMethod = plotToCopy.sortRowMethod
self.bShowColDendrogram = plotToCopy.bShowColDendrogram
self.bShowRowDendrogram = plotToCopy.bShowRowDendrogram
self.colourmap = plotToCopy.colourmap
self.legendPos = plotToCopy.legendPos
self.clusteringColThreshold = plotToCopy.clusteringColThreshold
self.clusteringRowThreshold = plotToCopy.clusteringRowThreshold
self.dendrogramHeight = plotToCopy.dendrogramHeight
self.dendrogramWidth = plotToCopy.dendrogramWidth
def plotDendrogram(self, matrix, axis, dendrogramMethod, clusteringThreshold, orientation, bPlot):
d = dist.pdist(matrix)
if dendrogramMethod == 'Average neighbour (UPGMA)':
linkage = cluster.linkage(dist.squareform(d), method='average')
elif dendrogramMethod == 'Centroid':
linkage = cluster.linkage(dist.squareform(d), method='centroid')
elif dendrogramMethod == 'Nearest neighbour':
linkage = cluster.linkage(dist.squareform(d), method='single')
elif dendrogramMethod == 'Furthest neighbour':
linkage = cluster.linkage(dist.squareform(d), method='complete')
elif dendrogramMethod == 'Ward':
linkage = cluster.linkage(dist.squareform(d), method='ward')
dendrogram = cluster.dendrogram(linkage, orientation=orientation, link_color_func=lambda k: 'k', ax=axis, no_plot=not bPlot)
index = cluster.fcluster(linkage, clusteringThreshold * max(linkage[:, 2]), 'distance')
axis.set_xticks([])
axis.set_yticks([])
return index, dendrogram['leaves']
def plot(self, profile, statsResults):
# determine features to plot
featuresToPlot = profile.profileDict.keys()
if self.bPlotOnlyActiveFeatures:
featuresToPlot = statsResults.activeFeatures
if len(featuresToPlot) <= 1 or (len(profile.samplesInGroup1) + len(profile.samplesInGroup2)) <= 1:
self.emptyAxis()
return
elif len(featuresToPlot) > 1000 or len(profile.samplesInGroup1) + len(profile.samplesInGroup2) > 1000:
QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
QtGui.QMessageBox.information(self, 'Too much data!', 'Heatmap plots are limited to 1000 samples and 1000 features.', QtGui.QMessageBox.Ok)
QtGui.QApplication.instance().restoreOverrideCursor()
self.emptyAxis()
return
# *** Colour of plot elements
group1Colour = str(self.preferences['Group colours'][profile.groupName1].name())
group2Colour = str(self.preferences['Group colours'][profile.groupName2].name())
# *** Colour map for category dendrogram on left
if self.colourmap == "Blues":
self.matrixColourmap = pylab.cm.Blues
elif self.colourmap == "Blue to red to green":
self.matrixColourmap = pylab.cm.brg
elif self.colourmap == "Blue to white to red":
self.matrixColourmap = pylab.cm.bwr
elif self.colourmap == "Cool to warm":
self.matrixColourmap = pylab.cm.cool
elif self.colourmap == "Grayscale":
self.matrixColourmap = pylab.cm.gist_yarg
elif self.colourmap == "Jet":
self.matrixColourmap = pylab.cm.jet
elif self.colourmap == "Orange to red":
self.matrixColourmap = pylab.cm.OrRd
elif self.colourmap == "Paired":
self.matrixColourmap = pylab.cm.Paired
elif self.colourmap == "Purple to green":
self.matrixColourmap = pylab.cm.PRGn
elif self.colourmap == "Reds":
self.matrixColourmap = pylab.cm.Reds
elif self.colourmap == "Red to blue":
self.matrixColourmap = pylab.cm.RdBu
elif self.colourmap == "Red to yellow to blue":
self.matrixColourmap = pylab.cm.RdYlBu
elif self.colourmap == "Spectral":
self.matrixColourmap = pylab.cm.spectral
elif self.colourmap == "Yellow to orange to red":
self.matrixColourmap = pylab.cm.YlOrRd
# *** Get data for each group
if self.fieldToPlot == "Number of sequences":
data1, data2 = profile.getActiveFeatureCounts(featuresToPlot)
else: # Proportion of sequences (%)
data1, data2 = profile.getActiveFeatureProportions(featuresToPlot)
matrix = []
for row in data1:
matrix.append(row)
for r in xrange(0, len(data2)):
matrix[r] += data2[r]
matrix = numpy.array(matrix)
# *** Get heatmap data
colHeaders = profile.samplesInGroup1 + profile.samplesInGroup2
rowHeaders = featuresToPlot
# *** Find longest label
bTruncate = False
if self.preferences['Truncate feature names']:
length = self.preferences['Length of truncated feature names']
bTruncate = True
longestLabelLen = 0
longestRowLabel = ''
for i in xrange(0, len(rowHeaders)):
if bTruncate and len(rowHeaders[i]) > length + 3:
rowHeaders[i] = rowHeaders[i][0:length] + '...'
if len(rowHeaders[i]) > longestLabelLen:
longestLabelLen = len(rowHeaders[i])
longestRowLabel = rowHeaders[i]
longestLabelLen = 0
longestColLabel = ''
for i in xrange(0, len(colHeaders)):
if bTruncate and len(colHeaders[i]) > length + 3:
colHeaders[i] = colHeaders[i][0:length] + '...'
if len(colHeaders[i]) > longestLabelLen:
longestLabelLen = len(colHeaders[i])
longestColLabel = colHeaders[i]
# *** Check sorting method and adjust dendrogram parameters appropriately
if self.sortRowMethod == 'Alphabetical order' or self.sortRowMethod == 'Mean abundance':
self.bShowRowDendrogram = False
if self.sortColMethod == 'Alphabetical order' or self.sortColMethod == 'Mean abundance':
self.bShowColDendrogram = False
# *** Set figure size
self.fig.clear()
self.fig.set_size_inches(self.figWidth, self.figHeight)
xLabelBounds, yLabelBounds = self.labelExtents([longestColLabel], 8, 90, [longestRowLabel], 8, 0)
# position all figure elements
colourBarWidthX = 0.2 / self.figWidth
colourBarWidthY = 0.2 / self.figHeight
marginX = 0.1 / self.figWidth
marginY = 0.1 / self.figHeight
if self.bShowRowDendrogram:
dendrogramWidth = self.dendrogramWidth / self.figWidth
else:
dendrogramWidth = 0.2 / self.figWidth
if self.bShowColDendrogram:
dendrogramHeight = self.dendrogramHeight / self.figHeight
else:
dendrogramHeight = 0.2 / self.figHeight
cellSizeX = max((1.0 - 2 * 0.02 - dendrogramWidth - colourBarWidthX - 2 * marginX - yLabelBounds.width), 0.01) * self.figWidth / len(colHeaders)
cellSizeY = max((1.0 - 2 * 0.02 - dendrogramHeight - colourBarWidthY - 2 * marginY - xLabelBounds.height), 0.01) * self.figHeight / len(rowHeaders)
cellSize = min(cellSizeX, cellSizeY)
cellSizeXPer = cellSize / self.figWidth
cellSizeYPer = cellSize / self.figHeight
paddingX = 0.5 * (1.0 - dendrogramWidth - 2 * marginX - colourBarWidthX - cellSizeXPer * len(colHeaders) - yLabelBounds.width)
paddingY = 0.5 * (1.0 - dendrogramHeight - 2 * marginY - colourBarWidthY - cellSizeYPer * len(rowHeaders) - xLabelBounds.height)
rowDendrogramX = paddingX
rowDendrogramY = paddingY + (xLabelBounds.height)
rowDendrogramW = dendrogramWidth
rowDendrogramH = cellSizeYPer * len(rowHeaders)
rowClusterBarX = rowDendrogramX + rowDendrogramW + marginX
rowClusterBarY = rowDendrogramY
rowClusterBarW = colourBarWidthX
rowClusterBarH = rowDendrogramH
colDendrogramX = rowClusterBarX + rowClusterBarW + marginX
colDendrogramY = rowDendrogramY + rowDendrogramH + marginY + colourBarWidthY + marginY
colDendrogramW = cellSizeXPer * len(colHeaders)
colDendrogramH = dendrogramHeight
colClusterBarX = colDendrogramX
colClusterBarY = rowDendrogramY + rowDendrogramH + marginY
colClusterBarW = colDendrogramW
colClusterBarH = colourBarWidthY
heatmapX = rowClusterBarX + rowClusterBarW + marginX
heatmapY = rowDendrogramY
heatmapW = colDendrogramW
heatmapH = rowDendrogramH
legendHeight = 0.2 / self.figHeight
legendW = min(0.8 * yLabelBounds.width, 1.25 / self.figWidth)
legendH = legendHeight
legendX = heatmapX + heatmapW + 0.2 / self.figWidth
legendY = 1.0 - legendHeight - (2 * yLabelBounds.height) - marginY
if not self.bShowColDendrogram:
# move legend to side
legendX = heatmapX + 0.5 * (heatmapW - legendW)
legendY = heatmapY + heatmapH + (1.5 * yLabelBounds.height) + 0.1 / self.figWidth
# plot dendrograms
if self.sortRowMethod == 'Alphabetical order':
leafIndex1 = numpy.argsort(rowHeaders)[::-1]
elif self.sortRowMethod == 'Mean abundance':
leafIndex1 = numpy.argsort(numpy.mean(matrix, axis=1))
else:
axisRowDendrogram = self.fig.add_axes([rowDendrogramX, rowDendrogramY, rowDendrogramW, rowDendrogramH], frame_on=False)
ind1, leafIndex1 = self.plotDendrogram(matrix, axisRowDendrogram, self.sortRowMethod, self.clusteringRowThreshold, 'right', bPlot=self.bShowRowDendrogram)
if self.sortColMethod == 'Alphabetical order':
leafIndex2 = numpy.argsort(colHeaders)
elif self.sortColMethod == 'Mean abundance':
leafIndex2 = numpy.argsort(numpy.mean(matrix, axis=0))
else:
axisColDendrogram = self.fig.add_axes([colDendrogramX, colDendrogramY, colDendrogramW, colDendrogramH], frame_on=False)
ind2, leafIndex2 = self.plotDendrogram(matrix.T, axisColDendrogram, self.sortColMethod, self.clusteringColThreshold, 'top', bPlot=self.bShowColDendrogram)
# *** Handle mouse events
xCell = []
yCell = []
tooltips = []
for x in xrange(0, len(colHeaders)):
for y in xrange(0, len(rowHeaders)):
xCell.append(x)
yCell.append(y)
tooltip = rowHeaders[leafIndex1[y]] + ', ' + colHeaders[leafIndex2[x]] + '\n'
if self.fieldToPlot == "Number of sequences":
tooltip += '%d' % (matrix[leafIndex1[y]][leafIndex2[x]])
else:
tooltip += '%.3f' % (matrix[leafIndex1[y]][leafIndex2[x]]) + '%'
tooltips.append(tooltip)
self.plotEventHandler = PlotEventHandler(xCell, yCell, tooltips, 0.4, 0.4)
self.mouseEventCallback(self.plotEventHandler)
# plot column clustering bars
sampleColourMap = []
for i in leafIndex2:
if colHeaders[i] in profile.samplesInGroup1:
sampleColourMap.append(group1Colour)
else:
sampleColourMap.append(group2Colour)
sampleColourMap = mpl.colors.ListedColormap(sampleColourMap)
matrix = matrix[:, leafIndex2]
if self.bShowColDendrogram:
ind2 = ind2[leafIndex2]
axc = self.fig.add_axes([colClusterBarX, colClusterBarY, colClusterBarW, colClusterBarH]) # axes for column side colorbar
dc = numpy.array(numpy.arange(len(leafIndex2)), dtype=int)
dc.shape = (1, len(leafIndex2))
axc.matshow(dc, aspect='auto', origin='lower', cmap=sampleColourMap)
axc.set_xticks([])
axc.set_yticks([])
# plot row clustering bars
matrix = matrix[leafIndex1, :]
if self.bShowRowDendrogram:
ind1 = ind1[leafIndex1]
axr = self.fig.add_axes([rowClusterBarX, rowClusterBarY, rowClusterBarW, rowClusterBarH])
dr = numpy.array(ind1, dtype=int)
dr.shape = (len(ind1), 1)
axr.matshow(dr, aspect='auto', origin='lower', cmap=self.discreteColourMap)
axr.set_xticks([])
axr.set_yticks([])
# determine scale for colour map
minValue = 1e6
maxValue = 0
for row in matrix:
minValue = min(minValue, min(row))
maxValue = max(maxValue, max(row))
norm = mpl.colors.Normalize(minValue, maxValue)
# plot heatmap
axisHeatmap = self.fig.add_axes([heatmapX, heatmapY, heatmapW, heatmapH])
axisHeatmap.matshow(matrix, origin='lower', cmap=self.matrixColourmap, norm=norm)
axisHeatmap.set_xticks([])
axisHeatmap.set_yticks([])
# row and column labels
labelOffset = 0.5 * (yLabelBounds.height / cellSizeYPer)
for i in xrange(0, len(rowHeaders)):
axisHeatmap.text(matrix.shape[1] - 0.5, i - labelOffset, ' ' + rowHeaders[leafIndex1[i]], horizontalalignment="left")
labelOffset = 0.5 * (xLabelBounds.width / cellSizeXPer)
for i in xrange(0, len(colHeaders)):
axisHeatmap.text(i - labelOffset, -0.5, ' ' + colHeaders[leafIndex2[i]], rotation='vertical', verticalalignment="top")
# plot colour map legend
axisColourMap = self.fig.add_axes([legendX, legendY, legendW, legendH], frame_on=False) # axes for colorbar
colourBar = mpl.colorbar.ColorbarBase(axisColourMap, cmap=self.matrixColourmap, norm=norm, orientation='horizontal')
if self.fieldToPlot == "Number of sequences":
axisColourMap.set_title("# sequences")
else:
axisColourMap.set_title("abundance (%)")
colourBar.set_ticks([minValue, 0.5 * (maxValue - minValue) + minValue, maxValue])
colourBar.set_ticklabels(['%.1f' % minValue, '%.1f' % (0.5 * (maxValue - minValue) + minValue), '%.1f' % maxValue])
# plot column and row lines
for i in xrange(0, len(rowHeaders)):
axisHeatmap.plot([-0.5, len(colHeaders) - 0.5], [i - 0.5, i - 0.5], color='white', linestyle='-', linewidth=1.5)
for i in xrange(0, len(colHeaders)):
axisHeatmap.plot([i - 0.5, i - 0.5], [-0.5, len(rowHeaders) - 0.5], color='white', linestyle='-', linewidth=1.5)
# plot legend
if self.legendPos != -1:
legend1 = Rectangle((0, 0), 1, 1, fc=group1Colour)
legend2 = Rectangle((0, 0), 1, 1, fc=group2Colour)
legend = self.fig.legend([legend1, legend2], (profile.groupName1, profile.groupName2), loc=self.legendPos, ncol=1)
legend.get_frame().set_linewidth(0)
self.updateGeometry()
self.draw()
def configure(self, profile, statsResults):
configDlg = ConfigureDialog(Ui_HeatmapPlotDialog)
configDlg.ui.cboFieldToPlot.setCurrentIndex(configDlg.ui.cboFieldToPlot.findText(self.fieldToPlot))
configDlg.ui.chkPlotOnlyActiveFeatures.setChecked(self.bPlotOnlyActiveFeatures)
configDlg.ui.spinFigWidth.setValue(self.figWidth)
configDlg.ui.spinFigHeight.setValue(self.figHeight)
configDlg.ui.cboColSortMethod.setCurrentIndex(configDlg.ui.cboColSortMethod.findText(self.sortColMethod))
configDlg.ui.cboRowSortMethod.setCurrentIndex(configDlg.ui.cboRowSortMethod.findText(self.sortRowMethod))
configDlg.ui.chkShowColDendrogram.setChecked(self.bShowColDendrogram)
configDlg.ui.chkShowRowDendrogram.setChecked(self.bShowRowDendrogram)
configDlg.ui.cboColourMap.setCurrentIndex(configDlg.ui.cboColourMap.findText(self.colourmap))
# legend position
if self.legendPos == 1:
configDlg.ui.radioLegendPosUpperRight.setChecked(True)
elif self.legendPos == 4:
configDlg.ui.radioLegendPosLowerRight.setChecked(True)
elif self.legendPos == 2:
configDlg.ui.radioLegendPosUpperLeft.setChecked(True)
elif self.legendPos == 3:
configDlg.ui.radioLegendPosLowerLeft.setChecked(True)
else:
configDlg.ui.radioLegendPosNone.setChecked(True)
configDlg.ui.spinColClusteringThreshold.setValue(self.clusteringColThreshold)
configDlg.ui.spinRowClusteringThreshold.setValue(self.clusteringRowThreshold)
configDlg.ui.spinDendrogramColHeight.setValue(self.dendrogramHeight)
configDlg.ui.spinDendrogramRowWidth.setValue(self.dendrogramWidth)
if configDlg.exec_() == QtGui.QDialog.Accepted:
self.fieldToPlot = str(configDlg.ui.cboFieldToPlot.currentText())
self.bPlotOnlyActiveFeatures = configDlg.ui.chkPlotOnlyActiveFeatures.isChecked()
self.figWidth = configDlg.ui.spinFigWidth.value()
self.figHeight = configDlg.ui.spinFigHeight.value()
self.sortColMethod = str(configDlg.ui.cboColSortMethod.currentText())
self.sortRowMethod = str(configDlg.ui.cboRowSortMethod.currentText())
self.bShowColDendrogram = configDlg.ui.chkShowColDendrogram.isChecked()
self.bShowRowDendrogram = configDlg.ui.chkShowRowDendrogram.isChecked()
self.colourmap = str(configDlg.ui.cboColourMap.currentText())
# legend position
if configDlg.ui.radioLegendPosUpperRight.isChecked() == True:
self.legendPos = 1
elif configDlg.ui.radioLegendPosLowerRight.isChecked() == True:
self.legendPos = 4
elif configDlg.ui.radioLegendPosUpperLeft.isChecked() == True:
self.legendPos = 2
elif configDlg.ui.radioLegendPosLowerLeft.isChecked() == True:
self.legendPos = 3
else:
self.legendPos = -1
self.clusteringColThreshold = configDlg.ui.spinColClusteringThreshold.value()
self.clusteringRowThreshold = configDlg.ui.spinRowClusteringThreshold.value()
self.dendrogramHeight = configDlg.ui.spinDendrogramColHeight.value()
self.dendrogramWidth = configDlg.ui.spinDendrogramRowWidth.value()
self.settings.setValue('group: ' + self.name + '/field to plot', self.fieldToPlot)
self.settings.setValue('group: ' + self.name + '/plot only active features', self.bPlotOnlyActiveFeatures)
self.settings.setValue('group: ' + self.name + '/width', self.figWidth)
self.settings.setValue('group: ' + self.name + '/height', self.figHeight)
self.settings.setValue('group: ' + self.name + '/sort col method', self.sortColMethod)
self.settings.setValue('group: ' + self.name + '/sort row method', self.sortRowMethod)
self.settings.setValue('group: ' + self.name + '/show col dendrogram', self.bShowColDendrogram)
self.settings.setValue('group: ' + self.name + '/show row dendrogram', self.bShowRowDendrogram)
self.settings.setValue('group: ' + self.name + '/colourmap', self.colourmap)
self.settings.setValue('group: ' + self.name + '/legend position', self.legendPos)
self.settings.setValue('group: ' + self.name + '/clustering col threshold', self.clusteringColThreshold)
self.settings.setValue('group: ' + self.name + '/clustering row threshold', self.clusteringRowThreshold)
self.settings.setValue('group: ' + self.name + '/dendrogram col height', self.dendrogramHeight)
self.settings.setValue('group: ' + self.name + '/dendrogram row width', self.dendrogramWidth)
self.plot(profile, statsResults)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
testWindow = TestWindow(HeatmapPlot)
testWindow.show()
sys.exit(app.exec_()) | gpl-3.0 |
cmbclh/vnpy1.7 | vnpy/DAO/testnolog.py | 2 | 3367 | ##-*-coding: utf-8;-*-##
import sys
sys.path.append('../')
#sys.path.append('D:\\tr\\vnpy-master\\vn.trader\\DAO')
sys.path.append('D:/tr/vnpy-1.7/vnpy/common')
import pandas as pd
from sqlalchemy import Column
from sqlalchemy import DECIMAL
from sqlalchemy import Integer
from sqlalchemy import String
from __init__ import *
import common
if __name__=='__main__':
#创建数据库表
columns=[Column('date',String(8),primary_key=True),Column('code',String(8),nullable=False,primary_key=True),Column('name',String(50)),Column('close',DECIMAL(12,4)),Column('open',DECIMAL(12,4))]
common.logger.info(u"执行数据库表%s的创建,列信息:%s" % ("test_table", str(columns)))
try:
if not isTableExist('vnpy', 'test_table') :
createTable('vnpy', 'test_table', columns)
print('DONE')
except Exception as e:
common.logger.error(u"创建指标数据库中的表%s发生了错误,错误信息:%s" % ("test_table", str(e.message)))
print("test")
print(u"test:%s %s" % ("test_table", str(e.message)))
print("开始写入")
#dataframe操作样例
#data = [['20150101','au1801','黄金','1','2'],['20150101','au1802','黄金2','1','2'],['20150101','au1803','黄金3','1','2']]
#data.rename(columns={'tdate':'date','symbol':'code','sname':'name','tclose':'close','topen':'open'},inplace=True)
# dataframe操作样例
data = ['tdate', 'symbol', 'sname', 'tclose', 'topen']
#data.rename(columns={'tdate': 'date', 'symbol': 'code', 'sname': 'name', 'tclose': 'close', 'topen': 'open'}, inplace=True)
datas = [['20150101','au1801','黄金','1','2'],['20150101','au1802','黄金2','1','2'],['20150101','au1803','黄金3','1','2']]
datas1 = (['20150101','au1801','黄金','1','2'],['20150101','au1802','黄金2','1','2'],['20150101','au1803','黄金3','1','2'])
#df_datas1 = pd.DataFrame(datas1)
#df = DataFrame(np.random.randn(4, 5), columns=['A', 'B', 'C', 'D', 'E'])
order = ['20150107','au1804','黄金','1','4']
d = pd.DataFrame([ ['20150101','au1804','黄金','1','4'], ['20150101','au1805','黄金2','1','5'], ['20150101' , 'au1806', '黄金3', '1', '6']], columns = ['date', 'code', 'name', 'close', 'open'])
d = pd.DataFrame([order], columns=['date', 'code', 'name', 'close', 'open'])
print("开始写入中")
try:
writeData('vnpy', 'test_table', d)
#common.logger.info(u"写入数据%s" % (d.max))
print("写入结束了")
except Exception as e:
common.logger.error(u"增量写入数据时发生了错误,错误信息:%s" % str(e.message))
print("写入报错")
#df_datas = pd.DataFrame(datas,data)
#X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# X_df = pd.DataFrame(X)
#df = pd.DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
#df.set_index(['a', 'b', 'c'], inplace=True)
# for d in df_datas1:
# print("开始写入中")
# try:
# writeData('vnpy','testtable',d)
# common.logger.info(u"写入数据%s" % (d.__str__()))
# print(u"写入数据%d" % d.count(d))
# print("开始写入了")
# except Exception as e:
# common.logger.error(u"增量写入数据时发生了错误,错误信息:%s" % str(e.message))
# print("写入报错")
# | mit |
piyush0609/scipy | scipy/stats/_binned_statistic.py | 26 | 17723 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First a basic example:
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2,
... label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
return BinnedStatisticResult(medians, edges[0], xy)
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
x_edges : (nx + 1) ndarray
The bin edges along the first dimension.
y_edges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
.. versionadded:: 0.11.0
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
return BinnedStatistic2dResult(medians, edges[0], edges[1], xy)
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
return BinnedStatisticddResult(result, edges, xy)
| bsd-3-clause |
vschw/smartmeter_visualization | smvi.py | 1 | 9214 | #! /usr/bin/env python
"""Data download and visualization tool for `smartmeter <https://github.com/vschw/smartmeter>
Dependencies
############
`paramiko_scp <https://github.com/jbardin/scp.py>`
"""
import paramiko
import numpy as np
import matplotlib.pyplot as plt
from pylab import ylabel
import ConfigParser
from scp import SCPClient
import argparse
import datetime
import time
def config_init():
"""Loads configuration data from config.ini
"""
global ip, usr, key_path, data_folder, db_name
config = ConfigParser.ConfigParser()
config.read('config.ini')
ip = config.get('ssh_login', 'ip')
usr = config.get('ssh_login', 'username')
key_path = config.get('ssh_login', 'key_path')
data_folder = config.get('ssh_login', 'data_folder')
db_name = config.get('db_login', 'db_name')
print "config.ini parsed"
def ssh_init():
"""SSH to the server using paramiko.
"""
global ssh
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username=usr, key_filename=key_path)
print 'SSH Connection established'
def ssh_close():
ssh.close()
def csv_create(args, circuit, csvname):
command = 'mongoexport --db '+db_name
command += ' --csv'
command += ' --out '+csvname
command += ' --fields timestamp,value'
command += ' --collection nodes'
command += " -q \'{node:"+str(args['nodes']).replace('[','').replace(']','')+", \
variable:\"circuit_"+str(circuit)+"\", \
timestamp:{$gt: "+str(date_to_timestamp(args['startdate'], args['starttime']))+", \
$lt: "+str(date_to_timestamp(args['enddate'], args['endtime']))+"}}\';"
#print command
ssh.exec_command('cd '+data_folder+';'
+command)
print 'csv created on remote server'
def csv_name(args, circuit):
if circuit == 2:
circ = 'c2'
else:
circ = 'c1'
return 'node'+str(args['nodes']).replace('[','').replace(']','').replace(' ','')+'-' \
+circ+'-'\
+args['startdate'].replace('-','')+'-'\
+args['enddate'].replace('-','')+'.csv'
def csv_download(csvname):
scp = SCPClient(ssh.get_transport())
scp.get(data_folder+'/'+csvname)
scp.close()
print 'csv file downloaded'
def csv_remove(csvname):
ssh.exec_command('cd '+data_folder+';'
+'rm '+csvname)
print 'csv file deleted from remote server'
def date_to_timestamp(date, clock='00:00:00'):
s = str(date)+' '+clock
return str(int(time.mktime(datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S").timetuple())))
def valid_date(s):
try:
datetime.datetime.strptime(s, "%Y-%m-%d")
return s
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def valid_time(x):
try:
datetime.datetime.strptime(x, "%H:%M:%S")
return x
except ValueError:
msg = "Not a valid time: '{0}'.".format(x)
raise argparse.ArgumentTypeError(msg)
def valid_node(n):
try:
nodelist = [int(x) for x in n.split(',')]
return nodelist
except TypeError:
msg = "Not a valid node format"
raise argparse.ArgumentTypeError(msg)
def valid_timezone(t):
if int(t) > 12 or int(t) < -12:
msg = "Not a valid valid timezone (-12<=t<=12)"
raise argparse.ArgumentTypeError(msg)
return t
def valid_circuit(c):
if int(c) != 1 and int(c) != 2:
msg = "Not a valid circuit selected (enter 1 or 2)"
raise argparse.ArgumentTypeError(msg)
return c
def figure(args, csvname1, csvname2):
if int(args['circuit']) == 0 or int(args['circuit']) == 1:
data1 = np.genfromtxt(csvname1, delimiter=',', skip_header=1,
skip_footer=0, names=['time', 'power'])
time1 =[datetime.datetime.fromtimestamp(ts) for ts in data1['time']]
if int(args['circuit']) == 0 or int(args['circuit']) == 2:
data2 = np.genfromtxt(csvname2, delimiter=',', skip_header=1,
skip_footer=0, names=['time', 'power'])
time2 =[datetime.datetime.fromtimestamp(ts) for ts in data2['time']]
if int(args['circuit']) == 0:
if len(data1['power']) + 1 == len(data2['power']):
power1 = data1['power']
power2 = data2['power'][:-1]
time = time2[:-1]
timest = data1['time']
elif len(data1['power']) == len(data2['power']) + 1:
power1 = data1['power'][:-1]
power2 = data2['power']
time = time1[:-1]
timest = data2['time']
elif len(data1['power']) + 2 == len(data2['power']):
power1 = data1['power']
power2 = data2['power'][:-2]
time = time2[:-2]
timest = data1['time']
elif len(data1['power']) == len(data2['power']) + 2:
power1 = data1['power'][:-2]
power2 = data2['power']
time = time1[:-2]
timest = data2['time']
else:
power1 = data1['power']
power2 = data2['power']
time = time1
timest = data1['time']
elif int(args['circuit']) == 1:
power1 = data1['power']
power2 = [0] * len(data1['power'])
time = time1
timest = data1['time']
elif int(args['circuit']) == 2:
power1 = [0] * len(data2['power'])
power2 = data2['power']
time = time2
timest = data2['time']
kwh = [0]
kwh[0] = 0
for i, ts in enumerate(time[:-1]):
if i == 0:
kwh.append(0)
else:
kwh.append(kwh[i] + ((power1[i] + power2[i]) / 1000 * (timest[i] - timest[i-1]) / 3600))
fig1 = plt.figure()
plt.xlabel('Time [%H:%M:%S]')
plt.ylabel('Power [W]')
ax1=plt.subplot(111)
ax1.plot(time, power1, label='circuit 1')
ax1.plot(time, power2, label='circuit 2')
ax1.plot(time, power2 + power1, label='total power')
plt.legend(loc='upper left')
if args['energy'] == 'true':
ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ylabel("Energy [kWh]")
ax2.plot(time, kwh, 'black',label='total energy')
plt.legend(loc='upper right')
plt.show()
def parse_args():
parser = argparse.ArgumentParser(description='Data download and visualization tool for smartmeter')
parser.add_argument('-n','--nodes', help='single node as int, aggregate of multiple nodes as comma-separated list '
'(e.g.: 1,2,3,4)', required=True, type=valid_node)
parser.add_argument('-t','--timezone', help='timezone as int from -12 to 12 in relation to UTC time (e.g.: -8), '
'default = UTC', required=False, type=valid_timezone, default=0)
parser.add_argument('-s', "--startdate", help="start date - format YYYY-MM-DD ", required=False, type=valid_date,
default=datetime.date.fromordinal(datetime.date.today().toordinal()-2).strftime('%Y-%m-%d'))
parser.add_argument('-e', "--enddate", help="end date - format YYYY-MM-DD ", required=False, type=valid_date,
default =datetime.date.fromordinal(datetime.date.today().toordinal()-1).strftime('%Y-%m-%d'))
parser.add_argument('-x', "--starttime", help="start time - format HH:MM:SS", required=False, type=valid_time,
default='00:00:00')
parser.add_argument('-y', "--endtime", help="end time - format HH:MM:SS", required=False, type=valid_time,
default='00:00:00')
parser.add_argument('-c','--circuit', help='single circuit as int, default = aggregate of both circuits',
required=False, type=valid_circuit, default=0)
parser.add_argument('-a','--energy', help='display energy aggregate in figure (true or false)',
required=False, default='true')
args = vars(parser.parse_args())
return args
if __name__ == "__main__":
args = parse_args()
print args
config_init()
ssh_init()
if int(args['circuit']) == 0:
csv_create(args, 1, csv_name(args, 1))
csv_create(args, 2, csv_name(args, 2))
elif int(args['circuit']) == 1:
csv_create(args, 1, csv_name(args, 1))
elif int(args['circuit']) == 2:
csv_create(args, 1, csv_name(args, 2))
time.sleep(3)
if int(args['circuit']) == 0:
csv_download(csv_name(args, 1))
csv_download(csv_name(args, 2))
elif int(args['circuit']) == 1:
csv_download(csv_name(args, 1))
elif int(args['circuit']) == 2:
csv_download(csv_name(args, 2))
if int(args['circuit']) == 0:
csv_remove(csv_name(args, 1))
csv_remove(csv_name(args, 2))
elif int(args['circuit']) == 1:
csv_remove(csv_name(args, 1))
elif int(args['circuit']) == 2:
csv_remove(csv_name(args, 2))
ssh_close()
figure(args, csv_name(args, 1),csv_name(args, 2))
| gpl-2.0 |
rlowrance/python_lib | applied_data_science3/dataframe.py | 1 | 6114 | '''function that operator on pandas DataFrame instances
create_report_categorical(df, ...)
create_report_numeric(df, ...)
replace(df, old_name, new_name, new_value)
Copyright 2017 Roy E. Lowrance
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on as "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing premission and
limitation under the license.
'''
import collections
import numpy as np
import pdb
from . import ColumnsTable
from . import Report
def create_report_categorical(df, excluded_columns=[], include_types=[object]):
'return tuple (Report instance, names of columns in the report)'
description = df.describe(
include=include_types,
)
print(description)
r = ReportCategorical()
included_columns = []
for column_name in description.columns:
print(column_name, len(column_name))
if column_name in excluded_columns:
print('create_report_categorical: excluding column:', column_name)
continue
else:
r.append_detail(description[column_name])
included_columns.append(column_name)
return r, included_columns
def create_report_numeric(df, excluded_columns=[], include_types=[np.number, object]):
'return tuple (Report instance, names of columns in the report)'
description = df.describe(
include=include_types,
)
print(description)
r = ReportNumeric()
included_columns = []
for column_name in description.columns:
print(column_name)
if column_name in excluded_columns:
print('create_report_numeric: excluding column:', column_name)
continue
else:
r.append_detail(description[column_name])
included_columns.append(column_name)
return r, included_columns
def replace(df, old_name, new_name, new_value):
'return new DataFrame with one column replaced'
df1 = df.copy()
df2 = df1.drop(old_name, 1) # 1 ==> drop column (as opposed to row)
df2.insert(0, new_name, new_value)
return df2
ColumnSpec = collections.namedtuple(
'ColumnSpec',
'print_width formatter heading1 heading2 legend',
)
def categorical(size, header1, header2, definition):
return ColumnSpec(size, '%%%ds' % size, header1, header2, definition)
def numeric(header1, header2, definition):
return ColumnSpec(12, '%12.2f', header1, header2, definition)
def count(header1, header2, definition):
return ColumnSpec(7, '%7d', header1, header2, definition)
all_column_specs = { # each with a 2-row header
'count': count('count', 'not NA', 'number of non-missing values'),
'datacol': ColumnSpec(30, '%30s', 'data', 'column', 'name of column in input file'),
'mean': numeric(' ', 'mean', 'mean value'),
'max': numeric(' ', 'max', 'maximum value'),
'min': numeric(' ', 'min', 'minimum value'),
'p25': numeric('25th', 'percentile', 'value that is the 25th percentile'),
'p50': numeric('50th', 'percentile', 'value that is the 50th percentile'),
'p75': numeric('75th', 'percentile', 'value that is the 75th percentile'),
'std': numeric('standard', 'deviation', 'standard deviation'),
'unique': numeric('num', 'unique', 'number of distinct values'),
'top': categorical(30, 'top', '(most common)', 'most common value'),
'freq': count('top', 'freq', 'frequency of the most common value'),
}
def column_def(column_name):
print(column_name)
assert column_name in all_column_specs, ('%s not defined in all_column_specs' % column_name)
column_spec = all_column_specs[column_name]
return [
column_name,
column_spec.print_width,
column_spec.formatter,
[column_spec.heading1, column_spec.heading2],
column_spec.legend,
]
def column_defs(*column_names):
return [
column_def(column_name)
for column_name in column_names
]
class ReportAnalysis(object):
def __init__(self, verbose=True):
self.report = Report.Report(also_print=verbose)
def write(self, path):
self.ct.append_legend()
for line in self.ct.iterlines():
self.report.append(line)
self.report.write(path)
class ReportCategorical(ReportAnalysis):
def __init__(self, verbose=True):
super(self.__class__, self).__init__() # create self.report
self.ct = ColumnsTable.ColumnsTable(
column_defs('datacol', 'unique', 'top', 'freq',),
)
self.report.append('Statistics on Categorical Columns')
self.report.append(' ')
def append_detail(self, col):
self.ct.append_detail(
datacol='Period' if col.name[:6] == 'Period' else col.name,
unique=col['unique'],
top=col['top'],
freq=col['freq'],
)
# timestamps include first and last, but this code doesn't handle them
assert 'first' not in col.index, col
assert 'last' not in col.index, col
class ReportNumeric(ReportAnalysis):
def __init__(self, verbose=True):
super(self.__class__, self).__init__() # create self.report
self.ct = ColumnsTable.ColumnsTable(
column_defs('datacol', 'count', 'mean', 'std', 'min', 'p25', 'p50', 'p75', 'max',)
)
self.report.append('Statistics on Numeric Columns')
self.report.append(' ')
def append_detail(self, col):
self.ct.append_detail(
datacol='Period' if col.name[:6] == 'Period' else col.name,
count=col['count'],
mean=col['mean'],
std=col['std'],
min=col['min'],
p25=col['25%'],
p50=col['50%'],
p75=col['75%'],
max=col['max'],
)
| apache-2.0 |
olologin/scikit-learn | examples/cluster/plot_cluster_comparison.py | 58 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
badlands-model/pyBadlands-Companion | badlands_companion/eroFunctions.py | 1 | 7636 | ##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling companion. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
Here we set usefull functions used to build dependency of erodibility coefficient to
precipitation and sediment supply.
"""
import os
import math
import numpy as np
import pandas as pd
class eroFunctions():
"""
Class for creating simple dependencies functions for erodibility coefficients.
"""
def __init__(self, min=0., max=None, sample=None):
self.x = np.linspace(min,max,num=sample,endpoint=True)
return
def sinfct(self):
"""
Sinusoidal function centered along the X-axis.
Returns
-------
y : 1d array
Centered sinusoidal function for x
"""
return np.sin(2.*np.pi*self.x*0.5/self.x.max())
def gaussfct(self, mean, sigma):
"""
Gaussian function.
Parameters
----------
mean : float
Gaussian parameter for center (mean) value.
sigma : float
Gaussian parameter for standard deviation.
Returns
-------
y : 1d array
Gaussian function for x
"""
return np.exp(-((self.x - mean) ** 2.) / float(sigma) ** 2.)
def gauss2fct(self, mean1, sigma1, mean2, sigma2):
"""
Gaussian function of two combined Gaussians.
Parameters
----------
mean1 : float
Gaussian parameter for center (mean) value of left-side Gaussian.
Note mean1 <= mean2 reqiured.
sigma1 : float
Standard deviation of left Gaussian.
mean2 : float
Gaussian parameter for center (mean) value of right-side Gaussian.
Note mean2 >= mean1 required.
sigma2 : float
Standard deviation of right Gaussian.
Returns
-------
y : 1d array
Function with left side up to `mean1` defined by the first
Gaussian, and the right side above `mean2` defined by the second.
In the range mean1 <= x <= mean2 the function has value = 1.
"""
assert mean1 <= mean2, 'mean1 <= mean2 is required. See docstring.'
y = np.ones(len(self.x))
idx1 = self.x <= mean1
idx2 = self.x > mean2
y[idx1] = np.exp(-((self.x[idx1] - mean1) ** 2.) / float(sigma1) ** 2.)
y[idx2] = np.exp(-((self.x[idx2] - mean2) ** 2.) / float(sigma2) ** 2.)
return y
def gbellfct(self, a, b, c):
"""
Generalized Bell function generator.
Parameters
----------
a : float
Bell function parameter controlling width.
b : float
Bell function parameter controlling slope.
c : float
Bell function parameter controlling center.
Returns
-------
y : 1d array
Generalized Bell function.
Notes
-----
Definition of Generalized Bell function is:
y(x) = 1 / (1 + abs([x - c] / a) ** [2 * b])
"""
return 1. / (1. + np.abs((self.x - c) / a) ** (2 * b))
def trapfct(self, abcd):
"""
Trapezoidal function generator.
Parameters
----------
abcd : 1d array, length 4
Four-element vector. Ensure a <= b <= c <= d.
Returns
-------
y : 1d array
Trapezoidal function.
"""
assert len(abcd) == 4, 'abcd parameter must have exactly four elements.'
a, b, c, d = np.r_[abcd]
assert a <= b and b <= c and c <= d, 'abcd requires the four elements \
a <= b <= c <= d.'
y = np.ones(len(self.x))
idx = np.nonzero(self.x <= b)[0]
y[idx] = trimf(self.x[idx], np.r_[a, b, b])
idx = np.nonzero(self.x >= c)[0]
y[idx] = trimf(self.x[idx], np.r_[c, c, d])
idx = np.nonzero(self.x < a)[0]
y[idx] = np.zeros(len(idx))
idx = np.nonzero(self.x > d)[0]
y[idx] = np.zeros(len(idx))
return y
def trifct(self, abc):
"""
Triangular function generator.
Parameters
----------
abc : 1d array, length 3
Three-element vector controlling shape of triangular function.
Requires a <= b <= c.
Returns
-------
y : 1d array
Triangular function.
"""
assert len(abc) == 3, 'abc parameter must have exactly three elements.'
a, b, c = np.r_[abc] # Zero-indexing in Python
assert a <= b and b <= c, 'abc requires the three elements a <= b <= c.'
y = np.zeros(len(self.x))
# Left side
if a != b:
idx = np.nonzero(np.logical_and(a < self.x, self.x < b))[0]
y[idx] = (self.x[idx] - a) / float(b - a)
# Right side
if b != c:
idx = np.nonzero(np.logical_and(b < self.x, self.x < c))[0]
y[idx] = (c - self.x[idx]) / float(c - b)
idx = np.nonzero(self.x == b)
y[idx] = 1
return y
def sigfct(self, b, c):
"""
The basic sigmoid function generator.
Parameters
----------
b : float
Offset or bias. This is the center value of the sigmoid, where it
equals 1/2.
c : float
Controls 'width' of the sigmoidal region about `b` (magnitude); also
which side of the function is open (sign). A positive value of `a`
means the left side approaches 0.0 while the right side approaches 1.;
a negative value of `c` means the opposite.
Returns
-------
y : 1d array
Generated sigmoid values, defined as y = 1 / (1. + exp[- c * (x - b)])
"""
return 1. / (1. + np.exp(- c * (self.x - b)))
def linfct(self, a, b):
"""
The basic linear function generator.
Parameters
----------
a : float
maximum value for the function
b : float
Offset from 0.
Returns
-------
y : 1d array
Generated linear function y = s x + b
"""
s = (a-b)/self.x.max()
return s * (self.x) + b
def exportFunction(self, val=None, nameCSV='sedsupply'):
"""
Write CSV file following Badlands requirements:
+ 2 columns file containing the X values (1st column) and the Y values (2nd column),
+ the separator is a space.
Parameters
----------
variable : val
Function used in either the sediment supply or slope dependency.
variable: nameCSV
Name of the saved CSV file.
"""
df = pd.DataFrame({'X':self.x,'Y':val})
df.to_csv(str(nameCSV)+'.csv',columns=['X', 'Y'], sep=' ', index=False ,header=0)
return | gpl-3.0 |
scollis/iris | lib/iris/symbols.py | 2 | 7677 | # (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Contains symbol definitions for use with :func:`iris.plot.symbols`.
"""
import itertools
import math
from matplotlib.patches import PathPatch
from matplotlib.path import Path
import numpy as np
__all__ = ('CLOUD_COVER',)
# The thickness to use for lines, circles, etc.
_THICKNESS = 0.1
def _make_merged_patch(paths):
# Convert a list of Path instances into a single, black PathPatch.
# Prepare empty vertex/code arrays for the merged path.
# The vertex array is initially flat for convenient initialisation,
# but is then reshaped to (N, 2).
total_len = sum(len(path) for path in paths)
all_vertices = np.empty(total_len * 2)
all_codes = np.empty(total_len, dtype=Path.code_type)
# Copy vertex/code details from the source paths
all_segments = itertools.chain(*(path.iter_segments() for path in paths))
i_vertices = 0
i_codes = 0
for vertices, code in all_segments:
n_vertices = len(vertices)
all_vertices[i_vertices:i_vertices + n_vertices] = vertices
i_vertices += n_vertices
n_codes = n_vertices / 2
if code == Path.STOP:
code = Path.MOVETO
all_codes[i_codes:i_codes + n_codes] = code
i_codes += n_codes
all_vertices.shape = (total_len, 2)
return PathPatch(Path(all_vertices, all_codes), facecolor='black',
edgecolor='none')
def _ring_path():
# Returns a Path for a hollow ring.
# The outer radius is 1, the inner radius is 1 - _THICKNESS.
circle = Path.unit_circle()
inner_radius = 1.0 - _THICKNESS
vertices = np.concatenate([circle.vertices[:-1],
circle.vertices[-2::-1] * inner_radius])
codes = np.concatenate([circle.codes[:-1], circle.codes[:-1]])
return Path(vertices, codes)
def _vertical_bar_path():
# Returns a Path for a vertical rectangle, with width _THICKNESS, that will
# nicely overlap the result of _ring_path().
width = _THICKNESS / 2.0
inner_radius = 1.0 - _THICKNESS
vertices = np.array([
[-width, -inner_radius],
[width, -inner_radius],
[width, inner_radius],
[-width, inner_radius],
[-width, inner_radius]
])
codes = np.array([Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
return Path(vertices, codes)
def _slot_path():
# Returns a Path for a filled unit circle with a vertical rectangle
# removed.
circle = Path.unit_circle()
vertical_bar = _vertical_bar_path()
vertices = np.concatenate([circle.vertices[:-1],
vertical_bar.vertices[-2::-1]])
codes = np.concatenate([circle.codes[:-1], vertical_bar.codes[:-1]])
return Path(vertices, codes)
def _left_bar_path():
# Returns a Path for the left-hand side of a horizontal rectangle, with
# height _THICKNESS, that will nicely overlap the result of _ring_path().
inner_radius = 1.0 - _THICKNESS
height = _THICKNESS / 2.0
vertices = np.array([
[-inner_radius, -height],
[0, -height],
[0, height],
[-inner_radius, height],
[-inner_radius, height]
])
codes = np.array([Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
return Path(vertices, codes)
def _slash_path():
# Returns a Path for diagonal, bottom-left to top-right rectangle, with
# width _THICKNESS, that will nicely overlap the result of _ring_path().
half_width = _THICKNESS / 2.0
central_radius = 1.0 - half_width
cos45 = math.cos(math.radians(45))
end_point_offset = cos45 * central_radius
half_width_offset = cos45 * half_width
vertices = np.array([
[-end_point_offset - half_width_offset,
-end_point_offset + half_width_offset],
[-end_point_offset + half_width_offset,
-end_point_offset - half_width_offset],
[end_point_offset + half_width_offset,
end_point_offset - half_width_offset],
[end_point_offset - half_width_offset,
end_point_offset + half_width_offset],
[-end_point_offset - half_width_offset,
-end_point_offset + half_width_offset]
])
codes = np.array([Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
return Path(vertices, codes)
def _backslash_path():
# Returns a Path for diagonal, top-left to bottom-right rectangle, with
# width _THICKNESS, that will nicely overlap the result of _ring_path().
half_width = _THICKNESS / 2.0
central_radius = 1.0 - half_width
cos45 = math.cos(math.radians(45))
end_point_offset = cos45 * central_radius
half_width_offset = cos45 * half_width
vertices = np.array([
[-end_point_offset - half_width_offset,
end_point_offset - half_width_offset],
[end_point_offset - half_width_offset,
-end_point_offset - half_width_offset],
[end_point_offset + half_width_offset,
-end_point_offset + half_width_offset],
[-end_point_offset + half_width_offset,
end_point_offset + half_width_offset],
[-end_point_offset - half_width_offset,
end_point_offset - half_width_offset]
])
codes = np.array([Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY])
return Path(vertices, codes)
def _wedge_fix(wedge_path):
'''
Fixes the problem with Path.wedge where it doesn't initialise the first,
and last two vertices.
This fix should not have any side-effects once Path.wedge has been fixed,
but will then be redundant and should be removed.
This is fixed in MPL v1.3, raising a RuntimeError. A check is performed to
allow for backward compatibility with MPL v1.2.x.
'''
if wedge_path.vertices.flags.writeable:
wedge_path.vertices[0] = 0
wedge_path.vertices[-2:] = 0
return wedge_path
CLOUD_COVER = {
0: [_ring_path()],
1: [_ring_path(), _vertical_bar_path()],
2: [_ring_path(), _wedge_fix(Path.wedge(0, 90))],
3: [_ring_path(), _wedge_fix(Path.wedge(0, 90)), _vertical_bar_path()],
4: [_ring_path(), Path.unit_circle_righthalf()],
5: [_ring_path(), Path.unit_circle_righthalf(), _left_bar_path()],
6: [_ring_path(), _wedge_fix(Path.wedge(-180, 90))],
7: [_slot_path()],
8: [Path.unit_circle()],
9: [_ring_path(), _slash_path(), _backslash_path()],
}
"""
A dictionary mapping WMO cloud cover codes to their corresponding symbol.
See http://www.wmo.int/pages/prog/www/DPFS/documents/485_Vol_I_en_colour.pdf
Part II, Appendix II.4, Graphical Representation of Data, Analyses
and Forecasts
"""
def _convert_paths_to_patches():
# Convert the symbols defined as lists-of-paths into patches.
for code, symbol in CLOUD_COVER.iteritems():
CLOUD_COVER[code] = _make_merged_patch(symbol)
_convert_paths_to_patches()
| gpl-3.0 |
yyjiang/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
jwdebelius/break_4w | break4w/tests/test_categorical.py | 1 | 7711 | from unittest import TestCase, main
import pandas as pd
import numpy as np
import numpy.testing as npt
import pandas.util.testing as pdt
from break4w.categorical import Categorical
class CategoricalTest(TestCase):
def setUp(self):
self.map_ = pd.DataFrame([['1', '2', '2', '4'],
['TBD', 'True', 'True', 'False'],
['Striker', 'D-man', 'D-man', 'Goalie']],
index=['years_on_team',
'team_captain', 'position'],
columns=['Bitty', 'Ransom', 'Holster',
'Johnson'],
).T
self.name = 'position'
self.description = 'Where the player can normally be found on the ice'
self.dtype = str
self.order = ["Striker", "D-man", "Goalie"]
self.c = Categorical(
name=self.name,
description=self.description,
dtype=self.dtype,
order=self.order,
)
def test_categorical_init(self):
test = Categorical(self.name,
self.description,
self.dtype,
self.order)
self.assertEqual(self.order, test.order)
self.assertEqual(test.type, 'Categorical')
self.assertEqual(test.frequency_cutoff, None)
self.assertEqual(test.ref_value, self.order[0])
self.assertEqual(test.var_numeric, None)
self.assertEqual(test.var_labels, None)
self.assertEqual(test.ambiguous, None)
def test_categorical_init_error(self):
with self.assertRaises(ValueError):
Categorical(self.name, self.description, order=self.order,
dtype=ValueError)
def test_categorical_numeric_ambig_str(self):
test = Categorical(self.name,
self.description,
int,
self.order,
ambiguous='manager',
var_labels='1=Striker | 2=D-man | 3=Goalie',
ref_value='coach',
)
self.assertEqual(test.var_numeric,
{'Striker': 1, 'D-man': 2, 'Goalie': 3})
self.assertEqual(test.var_labels,
{1: 'Striker', 2: 'D-man', 3: 'Goalie'})
self.assertEqual(test.ambiguous, set(['manager']))
self.assertEqual(test.ref_value, 'coach')
def test_categorical_dict_var_labels(self):
test = Categorical(self.name,
self.description,
int,
self.order,
var_labels={1: 'Striker', 2: 'D-man', 3: 'Goalie'},
)
def test_str_missing_order(self):
c = Categorical(self.name, self.description, dtype=int, order=[1, 2, 3],
var_labels='1=Striker | 2=D-man | 3=Goalie', missing=['manager'],
blanks=['Lax-Bro'])
known= """
------------------------------------------------------------------------------------
position (Categorical int)
Where the player can normally be found on the ice
------------------------------------------------------------------------------------
mapping 1=Striker
2=D-man
3=Goalie
missing manager
blanks Lax-Bro
------------------------------------------------------------------------------------
"""
test = c.__str__()
self.assertEqual(test, known)
def test_str_label_mapping(self):
Categorical(self.name,
self.description,
self.dtype,
self.order)
def test_update_order(self):
# Checks the current order
self.assertEqual(self.c.order, ["Striker", "D-man", "Goalie"])
# Sets up a function to adjust the data
def remap_(x):
if x in {"D-man", "Goalie"}:
return "Defense"
elif x in {"Striker"}:
return "Offense"
else:
return "Not on the team!"
# # updates the data
self.c._update_order(remap_)
# Checks the updated order
self.assertEqual(self.c.order, ["Offense", "Defense"])
def test_validate_dtype_fail(self):
self.c.dtype = bool
with self.assertRaises(TypeError):
self.c.validate(self.map_)
log_entry = self.c.log[0]
self.assertEqual(log_entry['command'], 'validate')
self.assertEqual(log_entry['transform_type'], 'error')
self.assertEqual(log_entry['transformation'],
'the data cannot be cast to bool')
def test_validate_fail_dtype(self):
self.c.dtype = int
with self.assertRaises(TypeError):
self.c.validate(self.map_)
log_entry = self.c.log[0]
self.assertEqual(log_entry['command'], 'validate')
self.assertEqual(log_entry['transform_type'], 'error')
self.assertEqual(log_entry['transformation'],
'the data cannot be cast to int'
)
def test_validate_fail_values(self):
self.c.name = 'years_on_team'
with self.assertRaises(ValueError):
self.c.validate(self.map_)
log_entry = self.c.log[1]
self.assertEqual(log_entry['command'], 'validate')
self.assertEqual(log_entry['transform_type'], 'error')
self.assertEqual(log_entry['transformation'],
'The following are not valid values: 1 | 2 | 4'
)
def test_validate_pass(self):
self.c.validate(self.map_)
log_entry = self.c.log[0]
self.assertEqual(log_entry['command'], 'validate')
self.assertEqual(log_entry['transform_type'], 'pass')
self.assertEqual(log_entry['transformation'],
'the data can be cast to str'
)
log_entry2 = self.c.log[1]
self.assertEqual(log_entry2['command'], 'validate')
self.assertEqual(log_entry2['transform_type'], 'pass')
self.assertEqual(log_entry2['transformation'],
'all values were valid'
)
def test_to_series(self):
known = pd.Series({'name': self.name,
'description': self.description,
'dtype': 'str',
'type': 'Categorical',
'clean_name': 'Position',
'order': 'Striker | D-man | Goalie',
'ref_value': 'Striker',
})
test_ = self.c._to_series()
pdt.assert_series_equal(known, test_)
def test_read_series(self):
var_ = pd.Series({'name': self.name,
'description': self.description,
'dtype': 'str',
'order': 'Striker | D-man | Goalie',
})
c = Categorical._read_series(var_)
self.assertTrue(isinstance(c, Categorical))
self.assertEqual(c.order, self.order)
self.assertEqual(c.name, self.name)
self.assertEqual(c.description, self.description)
self.assertEqual(c.dtype, str)
def test_round_trip(self):
var_ = self.c._to_series()
new_ = Categorical._read_series(var_)
self.assertEqual(self.c.__dict__, new_.__dict__)
if __name__ == '__main__':
main()
| bsd-2-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/recipes/fill_between_alpha.py | 1 | 5971 | """
Fill Between and Alpha
======================
The :meth:`~matplotlib.axes.Axes.fill_between` function generates a
shaded region between a min and max boundary that is useful for
illustrating ranges. It has a very handy ``where`` argument to
combine filling with logical ranges, e.g., to just fill in a curve over
some threshold value.
At its most basic level, ``fill_between`` can be use to enhance a
graphs visual appearance. Let's compare two graphs of a financial
times with a simple line plot on the left and a filled line on the
right.
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cbook as cbook
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# load up some sample financial data
with cbook.get_sample_data('goog.npz') as datafile:
r = np.load(datafile)['price_data'].view(np.recarray)
# Matplotlib prefers datetime instead of np.datetime64.
date = r.date.astype('O')
# create two subplots with the shared x and y axes
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True)
pricemin = r.close.min()
ax1.plot(date, r.close, lw=2)
ax2.fill_between(date, pricemin, r.close, facecolor='blue', alpha=0.5)
for ax in ax1, ax2:
ax.grid(True)
ax1.set_ylabel('price')
for label in ax2.get_yticklabels():
label.set_visible(False)
fig.suptitle('Google (GOOG) daily closing price')
fig.autofmt_xdate()
###############################################################################
# The alpha channel is not necessary here, but it can be used to soften
# colors for more visually appealing plots. In other examples, as we'll
# see below, the alpha channel is functionally useful as the shaded
# regions can overlap and alpha allows you to see both. Note that the
# postscript format does not support alpha (this is a postscript
# limitation, not a matplotlib limitation), so when using alpha save
# your figures in PNG, PDF or SVG.
#
# Our next example computes two populations of random walkers with a
# different mean and standard deviation of the normal distributions from
# which the steps are drawn. We use shared regions to plot +/- one
# standard deviation of the mean position of the population. Here the
# alpha channel is useful, not just aesthetic.
Nsteps, Nwalkers = 100, 250
t = np.arange(Nsteps)
# an (Nsteps x Nwalkers) array of random walk steps
S1 = 0.002 + 0.01*np.random.randn(Nsteps, Nwalkers)
S2 = 0.004 + 0.02*np.random.randn(Nsteps, Nwalkers)
# an (Nsteps x Nwalkers) array of random walker positions
X1 = S1.cumsum(axis=0)
X2 = S2.cumsum(axis=0)
# Nsteps length arrays empirical means and standard deviations of both
# populations over time
mu1 = X1.mean(axis=1)
sigma1 = X1.std(axis=1)
mu2 = X2.mean(axis=1)
sigma2 = X2.std(axis=1)
# plot it!
fig, ax = plt.subplots(1)
ax.plot(t, mu1, lw=2, label='mean population 1', color='blue')
ax.plot(t, mu2, lw=2, label='mean population 2', color='yellow')
ax.fill_between(t, mu1+sigma1, mu1-sigma1, facecolor='blue', alpha=0.5)
ax.fill_between(t, mu2+sigma2, mu2-sigma2, facecolor='yellow', alpha=0.5)
ax.set_title('random walkers empirical $\mu$ and $\pm \sigma$ interval')
ax.legend(loc='upper left')
ax.set_xlabel('num steps')
ax.set_ylabel('position')
ax.grid()
###############################################################################
# The ``where`` keyword argument is very handy for highlighting certain
# regions of the graph. ``where`` takes a boolean mask the same length
# as the x, ymin and ymax arguments, and only fills in the region where
# the boolean mask is True. In the example below, we simulate a single
# random walker and compute the analytic mean and standard deviation of
# the population positions. The population mean is shown as the black
# dashed line, and the plus/minus one sigma deviation from the mean is
# shown as the yellow filled region. We use the where mask
# ``X > upper_bound`` to find the region where the walker is above the one
# sigma boundary, and shade that region blue.
Nsteps = 500
t = np.arange(Nsteps)
mu = 0.002
sigma = 0.01
# the steps and position
S = mu + sigma*np.random.randn(Nsteps)
X = S.cumsum()
# the 1 sigma upper and lower analytic population bounds
lower_bound = mu*t - sigma*np.sqrt(t)
upper_bound = mu*t + sigma*np.sqrt(t)
fig, ax = plt.subplots(1)
ax.plot(t, X, lw=2, label='walker position', color='blue')
ax.plot(t, mu*t, lw=1, label='population mean', color='black', ls='--')
ax.fill_between(t, lower_bound, upper_bound, facecolor='yellow', alpha=0.5,
label='1 sigma range')
ax.legend(loc='upper left')
# here we use the where argument to only fill the region where the
# walker is above the population 1 sigma boundary
ax.fill_between(t, upper_bound, X, where=X > upper_bound, facecolor='blue',
alpha=0.5)
ax.set_xlabel('num steps')
ax.set_ylabel('position')
ax.grid()
###############################################################################
# Another handy use of filled regions is to highlight horizontal or
# vertical spans of an axes -- for that matplotlib has some helper
# functions :meth:`~matplotlib.axes.Axes.axhspan` and
# :meth:`~matplotlib.axes.Axes.axvspan` and example
# :ref:`sphx_glr_gallery_subplots_axes_and_figures_axhspan_demo.py`.
pltshow(plt)
| mit |
huobaowangxi/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/decomposition/truncated_svd.py | 38 | 7697 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
cbertinato/pandas | scripts/download_wheels.py | 10 | 1169 | #!/usr/bin/env python
"""Fetch wheels from wheels.scipy.org for a pandas version."""
import argparse
import pathlib
import sys
import urllib.parse
import urllib.request
from lxml import html
def parse_args(args=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("version", type=str, help="Pandas version (0.23.0)")
return parser.parse_args(args)
def fetch(version):
base = 'http://wheels.scipy.org'
tree = html.parse(base)
root = tree.getroot()
dest = pathlib.Path('dist')
dest.mkdir(exist_ok=True)
files = [x for x in root.xpath("//a/text()")
if x.startswith('pandas-{}'.format(version))
and not dest.joinpath(x).exists()]
N = len(files)
for i, filename in enumerate(files, 1):
out = str(dest.joinpath(filename))
link = urllib.request.urljoin(base, filename)
urllib.request.urlretrieve(link, out)
print("Downloaded {link} to {out} [{i}/{N}]".format(
link=link, out=out, i=i, N=N
))
def main(args=None):
args = parse_args(args)
fetch(args.version)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
mojoboss/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
FFroehlich/AMICI | python/amici/__init__.py | 1 | 10266 | """
AMICI
-----
The AMICI Python module provides functionality for importing SBML models and
turning them into C++ Python extensions.
Getting started:
```
# creating a extension module for an SBML model:
import amici
amiSbml = amici.SbmlImporter('mymodel.sbml')
amiSbml.sbml2amici('modelName', 'outputDirectory')
# using the created module (set python path)
import modelName
help(modelName)
```
:var amici_path:
absolute root path of the amici repository
:var amiciSwigPath:
absolute path of the amici swig directory
:var amiciSrcPath:
absolute path of the amici source directory
:var amiciModulePath:
absolute root path of the amici module
:var hdf5_enabled:
boolean indicating if amici was compiled with hdf5 support
:var has_clibs:
boolean indicating if this is the full package with swig interface or
the raw package without
:var capture_cstdout:
context to redirect C/C++ stdout to python stdout if python stdout was
redirected (doing nothing if not redirected).
"""
import importlib
import os
import re
import sys
from contextlib import suppress
from types import ModuleType
from typing import Optional, Union, Sequence, List
def _get_amici_path():
"""
Determine package installation path, or, if used directly from git
repository, get repository root
"""
basedir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
if os.path.exists(os.path.join(basedir, '.git')):
return os.path.abspath(basedir)
return os.path.dirname(__file__)
def _get_commit_hash():
"""Get commit hash from file"""
basedir = os.path.dirname(os.path.dirname(os.path.dirname(amici_path)))
commitfile = next(
(
file for file in [
os.path.join(basedir, '.git', 'FETCH_HEAD'),
os.path.join(basedir, '.git', 'ORIG_HEAD'), ]
if os.path.isfile(file)
),
None
)
if commitfile:
with open(commitfile) as f:
return str(re.search(r'^([\w]*)', f.read().strip()).group())
return 'unknown'
def _imported_from_setup() -> bool:
"""Check whether this module is imported from `setup.py`"""
from inspect import getouterframes, currentframe
# in case we are imported from setup.py, this will be the AMICI package
# root directory (otherwise it is most likely the Python library directory,
# we are not interested in)
package_root = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
for frame in getouterframes(currentframe()):
# Need to compare the full path, in case a user tries to import AMICI
# from a module `*setup.py`. Will still cause trouble if some package
# requires the AMICI extension during its installation, but seems
# unlikely...
frame_path = os.path.realpath(os.path.expanduser(frame.filename))
if frame_path == os.path.join(package_root, 'setup.py'):
return True
return False
# redirect C/C++ stdout to python stdout if python stdout is redirected,
# e.g. in ipython notebook
capture_cstdout = suppress
if sys.stdout != sys.__stdout__:
try:
from wurlitzer import sys_pipes as capture_cstdout
except ModuleNotFoundError:
pass
# Initialize AMICI paths
amici_path = _get_amici_path()
amiciSwigPath = os.path.join(amici_path, 'swig')
amiciSrcPath = os.path.join(amici_path, 'src')
amiciModulePath = os.path.dirname(__file__)
has_clibs = any([os.path.isfile(os.path.join(amici_path, wrapper))
for wrapper in ['amici.py', 'amici_without_hdf5.py']])
AmiciModel = Union['amici.Model', 'amici.ModelPtr']
AmiciSolver = Union['amici.Solver', 'amici.SolverPtr']
AmiciExpData = Union['amici.ExpData', 'amici.ExpDataPtr']
AmiciExpDataVector = Union['amici.ExpDataPtrVector', Sequence[AmiciExpData]]
# Get version number from file
with open(os.path.join(amici_path, 'version.txt')) as f:
__version__ = f.read().strip()
__commit__ = _get_commit_hash()
# Import SWIG module and swig-dependent submodules if required and available
if not _imported_from_setup():
if has_clibs:
from . import amici
from .amici import *
# These module require the swig interface and other dependencies
from .numpy import ReturnDataView, ExpDataView
from .pandas import (
getEdataFromDataFrame,
getDataObservablesAsDataFrame,
getSimulationObservablesAsDataFrame,
getSimulationStatesAsDataFrame,
getResidualsAsDataFrame
)
# These modules don't require the swig interface
from .sbml_import import SbmlImporter, assignmentRules2observables
from .ode_export import ODEModel, ODEExporter
hdf5_enabled = 'readSolverSettingsFromHDF5' in dir()
def runAmiciSimulation(
model: AmiciModel,
solver: AmiciSolver,
edata: Optional[AmiciExpData] = None
) -> 'numpy.ReturnDataView':
"""
Convenience wrapper around amici.runAmiciSimulation (generated by swig)
:param model: Model instance
` :param solver: Solver instance, must be generated from Model.getSolver()
:param edata: ExpData instance (optional)
:returns: ReturnData object with simulation results
"""
if edata and isinstance(edata, amici.ExpDataPtr):
edata = edata.get()
with capture_cstdout():
rdata = amici.runAmiciSimulation(solver.get(), edata, model.get())
return numpy.ReturnDataView(rdata)
def ExpData(*args) -> 'amici.ExpData':
"""
Convenience wrapper for ExpData constructors
:param args: arguments
:returns: ExpData Instance
"""
if isinstance(args[0], ReturnDataView):
return amici.ExpData(args[0]['ptr'].get(), *args[1:])
elif isinstance(args[0], amici.ExpDataPtr):
# the *args[:1] should be empty, but by the time you read this,
# the constructor signature may have changed and you are glad this
# wrapper did not break.
return amici.ExpData(args[0].get(), *args[1:])
elif isinstance(args[0], amici.ModelPtr):
return amici.ExpData(args[0].get())
else:
return amici.ExpData(*args)
def runAmiciSimulations(
model: AmiciModel,
solver: AmiciSolver,
edata_list: AmiciExpDataVector,
failfast: bool = True,
num_threads: int = 1,
) -> List['numpy.ReturnDataView']:
"""
Convenience wrapper for loops of amici.runAmiciSimulation
:param model: Model instance
:param solver: Solver instance, must be generated from Model.getSolver()
:param edata_list: list of ExpData instances
:param failfast: returns as soon as an integration failure is encountered
:param num_threads: number of threads to use (only used if compiled
with openmp)
:returns: list of simulation results
"""
with capture_cstdout():
edata_ptr_vector = amici.ExpDataPtrVector(edata_list)
rdata_ptr_list = amici.runAmiciSimulations(solver.get(),
edata_ptr_vector,
model.get(),
failfast,
num_threads)
return [numpy.ReturnDataView(r) for r in rdata_ptr_list]
def readSolverSettingsFromHDF5(
file: str,
solver: AmiciSolver,
location: Optional[str] = 'solverSettings'
) -> None:
"""
Convenience wrapper for :fun:`amici.readSolverSettingsFromHDF5`
:param file: hdf5 filename
:param solver: Solver instance to which settings will be transferred
:param location: location of solver settings in hdf5 file
"""
if isinstance(solver, amici.SolverPtr):
amici.readSolverSettingsFromHDF5(file, solver.get(), location)
else:
amici.readSolverSettingsFromHDF5(file, solver, location)
def writeSolverSettingsToHDF5(
solver: AmiciSolver,
file: Union[str, object],
location: Optional[str] = 'solverSettings'
) -> None:
"""
Convenience wrapper for :fun:`amici.writeSolverSettingsToHDF5`
:param file: hdf5 filename, can also be object created by
:fun:`amici.createOrOpenForWriting`
:param solver: Solver instance from which settings will stored
:param location: location of solver settings in hdf5 file
"""
if isinstance(solver, amici.SolverPtr):
amici.writeSolverSettingsToHDF5(solver.get(), file, location)
else:
amici.writeSolverSettingsToHDF5(solver, file, location)
class add_path:
"""Context manager for temporarily changing PYTHONPATH"""
def __init__(self, path: str):
self.path: str = path
def __enter__(self):
if self.path:
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
def import_model_module(module_name: str,
module_path: Optional[str] = None) -> ModuleType:
"""
Import Python module of an AMICI model
:param module_name: Name of the python package of the model
:param module_path: Absolute or relative path of the package directory
:return: The model module
"""
# ensure we will find the newly created module
importlib.invalidate_caches()
if not os.path.isdir(module_path):
raise ValueError(f"module_path '{module_path}' is not a directory.")
module_path = os.path.abspath(module_path)
# module already loaded?
if module_name in sys.modules:
# if a module with that name is already in sys.modules, we remove it,
# along with all other modules from that package. otherwise, there
# will be trouble if two different models with the same name are to
# be imported.
del sys.modules[module_name]
# collect first, don't delete while iterating
to_unload = {loaded_module_name for loaded_module_name in
sys.modules.keys() if
loaded_module_name.startswith(f"{module_name}.")}
for m in to_unload:
del sys.modules[m]
with add_path(module_path):
return importlib.import_module(module_name)
| bsd-2-clause |
xuewei4d/scikit-learn | examples/mixture/plot_concentration_prior.py | 31 | 5695 | """
========================================================================
Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture
========================================================================
This example plots the ellipsoids obtained from a toy dataset (mixture of three
Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a
Dirichlet distribution prior
(``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet
process prior (``weight_concentration_prior_type='dirichlet_process'``). On
each figure, we plot the results for three different values of the weight
concentration prior.
The ``BayesianGaussianMixture`` class can adapt its number of mixture
components automatically. The parameter ``weight_concentration_prior`` has a
direct link with the resulting number of components with non-zero weights.
Specifying a low value for the concentration prior will make the model put most
of the weight on few components set the remaining components weights very close
to zero. High values of the concentration prior will allow a larger number of
components to be active in the mixture.
The Dirichlet process prior allows to define an infinite number of components
and automatically selects the correct number of components: it activates a
component only if it is necessary.
On the contrary the classical finite mixture model with a Dirichlet
distribution prior will favor more uniformly weighted components and therefore
tends to divide natural clusters into unnecessary sub-components.
"""
# Author: Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.mixture import BayesianGaussianMixture
print(__doc__)
def plot_ellipses(ax, weights, means, covars):
for n in range(means.shape[0]):
eig_vals, eig_vecs = np.linalg.eigh(covars[n])
unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0])
angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0])
# Ellipse needs degrees
angle = 180 * angle / np.pi
# eigenvector normalization
eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals)
ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1],
180 + angle, edgecolor='black')
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ell.set_facecolor('#56B4E9')
ax.add_artist(ell)
def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
ax1.set_title(title)
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for k, w in enumerate(estimator.weights_):
ax2.bar(k, w, width=0.9, color='#56B4E9', zorder=3,
align='center', edgecolor='black')
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left=False,
right=False, labelleft=False)
ax2.tick_params(axis='x', which='both', top=False)
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
# Parameters of the dataset
random_state, n_components, n_features = 2, 3, 2
colors = np.array(['#0072B2', '#F0E442', '#D55E00'])
covars = np.array([[[.7, .0], [.0, .1]],
[[.5, .0], [.0, .1]],
[[.5, .0], [.0, .1]]])
samples = np.array([200, 500, 200])
means = np.array([[.0, -.70],
[.0, .0],
[.0, .70]])
# mean_precision_prior= 0.8 to minimize the influence of the prior
estimators = [
("Finite mixture with a Dirichlet distribution\nprior and "
r"$\gamma_0=$", BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [0.001, 1, 1000]),
("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$",
BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [1, 1000, 100000])]
# Generate data
rng = np.random.RandomState(random_state)
X = np.vstack([
rng.multivariate_normal(means[j], covars[j], samples[j])
for j in range(n_components)])
y = np.concatenate([np.full(samples[j], j, dtype=int)
for j in range(n_components)])
# Plot results in two different figures
for (title, estimator, concentrations_prior) in estimators:
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05,
left=.03, right=.99)
gs = gridspec.GridSpec(3, len(concentrations_prior))
for k, concentration in enumerate(concentrations_prior):
estimator.weight_concentration_prior = concentration
estimator.fit(X)
plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator,
X, y, r"%s$%.1e$" % (title, concentration),
plot_title=k == 0)
plt.show()
| bsd-3-clause |
tody411/InverseToon | inversetoon/geometry/bounding_box.py | 1 | 2148 | # -*- coding: utf-8 -*-
## @package inversetoon.geometry.bounding_box
#
# Implementation of a 3D bounding box.
# @author tody
# @date 2015/08/12
import numpy as np
## Implementation of a 2D bounding box.
class BoundingBox:
## Constructor
#
# @param points (n x 2) array.
def __init__(self, points=[]):
if len(points) > 0:
self.create(points)
## Creates the bounding box which contains the given points.
#
# @param points (n x 2) array.
def create(self, points):
points = np.array(points)
xs, ys = points[:, 0], points[:, 1]
self._x_min = np.min(xs)
self._x_max = np.max(xs)
self._y_min = np.min(ys)
self._y_max = np.max(ys)
## Returns the minimum point for the bounding box.
def min(self):
return np.array([self._x_min, self._y_min])
## Returns the maximum point for the bounding box.
def max(self):
return np.array([self._x_max, self._y_max])
## Returns the center of the bounding box.
def center(self):
return 0.5 * (self.min() + self.max())
## Returns true if the bounding box intersects another given bounding box.
def intersects(self, bb):
return self._x_max > bb._x_min and self._x_min < bb._x_max and self._y_max > bb._y_min and self._y_min < bb._y_max
## Returns true if the bounding box contains the given point.
def contains(self, p):
x, y = p
return self._x_min < x and self._x_max > x and self._y_min < y and self._y_max > y
## Plot bounding box with matplot.
def plotBoundingBox(self, plt, color="b", alpha=0.05, **kargs):
x0, y0 = self.min()
x1, y1 = self.max()
points = np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)])
plt.fill(points[:, 0], points[:, 1], color=color, alpha=alpha, **kargs)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from inversetoon.plot.window import showMaximize
points = np.random.rand(100, 2)
plt.scatter(points[:, 0], points[:, 1])
bb = BoundingBox(points)
bb.plotBoundingBox(plt)
showMaximize() | mit |
wschenck/nest-simulator | pynest/examples/spatial/connex_ew.py | 14 | 2269 | # -*- coding: utf-8 -*-
#
# connex_ew.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NEST spatial example
--------------------
Create two populations of iaf_psc_alpha neurons on a 30x30 grid with edge_wrap,
connect with circular mask, flat probability,
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import matplotlib.pyplot as plt
import numpy as np
import nest
nest.ResetKernel()
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.], edge_wrap=True)
#######################################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'mask': {'circular': {'radius': 0.5}}}
nest.Connect(a, b,
conn_spec=cdict,
syn_spec={'weight': nest.random.uniform(0.5, 2.)})
plt.clf()
#####################################################################
# plot targets of neurons in different grid locations
# first, clear existing figure, get current figure
plt.clf()
fig = plt.gcf()
# plot targets of two source neurons into same figure, with mask
for src_index in [30 * 15 + 15, 0]:
# obtain node id for center
src = a[src_index:src_index + 1]
nest.PlotTargets(src, b, mask=cdict['mask'], fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets')
plt.show()
# plt.savefig('connex_ew.pdf')
| gpl-2.0 |
abimannans/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
cpbl/pystata | latexRegressions.py | 1 | 319440 | #from pystata import *
import re, os
from pystata import defaults # Do not import from pystata_config; that should only be done in one place, and globally once.
paths = defaults['paths']
WP = paths['working']
from pylab import array, flatten, arange
import pandas as pd
import pylab as plt
from copy import deepcopy
from pystata_core import * # Should I be removing this line for style reasons??
from .pystata_core import standardSubstitutions, substitutedNames, readStataRegressionLogFile, texheader, defaultVariableOrder # Or import it as stata??
from .pystata_core import *
from codecs import open # I need to do this just to get encoding= option in open() ?.
if 'stata' not in defaults[
'paths']: # This is actually for sprawl's analysis.py, feb2014
defaults['paths']['stata'] = defaults['paths']
from cpblUtilities import debugprint, uniqueInOrder, doSystemLatex
from cpblUtilities.textables import chooseSFormat, tableToTSV, cpblTable_to_PDF
from cpblUtilities import transLegend, dfPlotWithEnvelope, str2pathname, flattenList, dgetget, NaN, dsetset
from cpblUtilities.cpblunicode import str2latex
from cpblUtilities.color import getIndexedColormap
from pystata.codebooks import stataCodebookClass
"""
To do:
revise generate_postEstimate_sums_by_condition(rollingX,ifs) to give DataFrame return value
revise semiRolling to build dataframes only.
"""
###########################################################################################
###
class latexRegressionFile(): # # # # # # MAJOR CLASS # # # # # #
###
#######################################################################################
""" Open a file and write some headers. The file will act as a .tex file with a regression table in it.
As a class, it's a wrapper for some of my python-stata-latex programs that convert Stata output to latex.
It also compiles the file when it is closed..
April 2008: the "suppressSE" argument throughout this class and etc just became obselete: LaTeX now uses a facultative \useSEs{}{} to choose between showing standard errors or not.
Dec 2009:
Allow specification of a "main survey" and a "main data file". This makes it easier to do a half-decent guess job at constructing the right descriptive statistics tables. That is, if we know a survey to use, we can look up the PDF codebook to find descriptions/questions. If we know the the right data file, we can calculate stats using the right/final/regression-ready data. Because these ideas do not always make sense (multiple surveys or files.), one can also call descriptive stats tables along the way with these things specified.
"""
# Caution: If you define variables here, before __init__, you can access them with latexinstance.avar , but those are common to the entire class, not to an instance.
def __init__(self,
filename,
margins='default',
allow_underscore=True,
colour=True,
modelVersion=None,
regressionVersion=None,
compactPreview=True,
codebook=None,
mainSurvey=None,
mainDataFile=None,
recreateCodebook=None,
substitutions=None,
texNameSuffix=None):
"""
codebook can be either a string (name of DTA file) or a codebook object. It's appropriate when there is one dataset for the whole file.. Nov 2009. I suppose I could add this functionality of making descriptive stats tables somehow at the table-level....
"""
self.lfile = None
self.lfileTeXbody = ''
self.fname = None
self.fpathname = None
self.modelVersion = None
self.regressionVersion = None
self.compactPreview = True
self.codebook = None # Not implemented yet? Added only Dec 2009.. does this conflict with some other functionality?
self.mainSurvey = None
self.mainDataFile = None
self.recreateCodebook = None #This tells the codebook class whether to recreate the codebook of mainDataFile...
self.captureNoObservations = None # Dec 2010: update. This should really be default true, as there's no harm now. I use Stata's "capture noisily" to test whether we need to introduce a dummy regression when a regression fails with no samples.
self.skipStataForCompletedTables = False # This sets all regTable calls not to call Stata for any tables which have been completed, even if they are old. That is, either DO NOT TURN THIS ON or actually delete the Stata .log files in the texPath that you wish to update/overwrite. So this is also a debugging tool, basically, to focus on tables that are causing trouble when you may be doing many tables all in one program. The trick, then, if you're using large data files, is to make sure that the data files are loaded as part of the first model's "code" element, in order that it doesn't get called if the table is to be skipped... So there's a tradeoff if you're making many tables from the same data.
self.skipSavingExistingFigures = None # If not set, this will follow skipStataForCompletedTables. Otherwise, it can separately choose whether to update figures or not (saving is often the slowest part, and is often done through a call to this class).
self.usingDisclosedData = False # This means never run stata, since only log files are available, not data. (ie we're analysiing RDC disclosures). So this can just be used as a double check to enforce no stata. Not fully implemented yet. (2010 July)
self.skipAllDerivedTables = False # ie if True, ignore all table calls (or just output) whenevr "skipStata=True" flag is used. So this is for making a more compact PDF for exporting results from RDC. Conversely, when set to True it also ignores any skipLaTeX=True calls, since those might be the original/main regression call. Also, it does not produce CRC tables unless forced to with "onlyCRC" or etc.
self.variableOrder = None # This is a default variable order to override what's in pystata module. This simplifies things so one doesn't need to specify it for every single table, though it can also be done per table.
self.txtNamesUsed = []
self.variablesUsed = '' # Collect all variables from regressions, useful for choosing what to display as desc stats at end.
if not modelVersion:
modelVersion = ''
if not regressionVersion:
regressionVersion = ''
self.modelVersion = modelVersion
self.regressionVersion = regressionVersion
self.compactPreview = compactPreview # This just means not to show/use sections (function names) and subsections (table/figure names?) in output PDF?
self.substitutions = standardSubstitutions
if substitutions is not None: # Can set default set of variable name translations for the entire latex instance.
self.substitutions = substitutions
self.txtNamesUsed = []
self.updateSettings(
codebook=codebook,
mainSurvey=mainSurvey,
mainDataFile=mainDataFile,
recreateCodebook=recreateCodebook)
if filename.endswith('.tex'):
filename = filename[:-5]
self.fname = filename + ('-' + self.modelVersion) * (
not not self.modelVersion) + ('-' + self.regressionVersion) * (
not not self.regressionVersion)
if texNameSuffix is None:
texNameSuffix = ''
self.fname += texNameSuffix
self.fpathname = defaults['native']['paths'][
'tex'] + self.fname # Native: since we just want to run stata on windows, not tex
#print ' Initiating a latex file %s.tex with margins: %s'%(self.fpathname,margins)
#lfile=open(self.fpathname+'.partial.tex','wt')
thead = texheader(
margins=margins, allow_underscore=allow_underscore).replace(
'%LOT:', '').replace('%LOF:', '')
if not self.compactPreview:
thead = thead.replace('%TOC:', '')
#lfile.write(thead)
#lfile.close()
self.lfileTeXwrapper = [
thead, '\n' + r'\clearpage\newpage\end{landscape}' +
' End\n\\end{document}\n'
]
#self.lfileTeX_original=self.lfileTeXbody+'' # Maybe obselete now, since I have body separate
def updateSettings(self,
codebook=None,
mainSurvey=None,
mainDataFile=None,
recreateCodebook=None):
self.mainSurvey = mainSurvey
self.mainDataFile = mainDataFile
self.codebook = codebook # eithe filename or codebook object...
self.recreateCodebook = recreateCodebook
def append(self, string):
self.lfileTeXbody += string
#lfile=open(self.fpathname+'.partial.tex','at')
#lfile.write(string)
#lfile.close()
###########################################################################################
###
def appendRegressionTable(
self,
models,
suppressSE=False,
substitutions=None,
transposed=None,
tableFilePath=None,
tableFormat=None,
sourceLogfile=None
): # tableCaption=None, tableComments=None,modelTeXformat=None,extrarows,
###
#######################################################################################
"""
This takes estimation results (data) and makes a LaTeX output, adding it to the self.
Aug 2009: Rewrote this function to use list of model dicts, rather than vectors of various attributes plus a "pairedrows".
eliminated: colnames,colnums,coefrows, hiderows and landscape,... rowmodelNams
sourceLogfile: Nov 2010: Now it also accepts (should demand?) the filename of the source stata log file. This entire logfile is duplicated, commented out, inside any latex file created.
June 2011: Needs to be altered to use new two-formats-in-one-tex-file ability of cpblTableC style. For instance, I could have it so that the automated choice of transposed or not still here still uses the cpblTableC files as untransposed as first (default) option, and simple changes the wrapper.
"""
if sourceLogfile is not None:
assert all([sourceLogfile == mm['logFilename'] for mm in models])
if substitutions == None:
substitutions = self.substitutions
if 'version' == 'priorToJune2011': # you can now pass "both" as value for transposed to appendRegressionTable, so that it doesn't duplicate the cpbltablec tex file.
if isinstance(transposed, str) and transposed.lower() == 'both':
self.appendRegressionTable(
models,
suppressSE=suppressSE,
substitutions=substitutions, #modelTeXformat=modelTeXformat,
tableFilePath=tableFilePath,
tableFormat=tableFormat,
sourceLogfile=sourceLogfile, #tableCaption=tableCaption, tableComments=tableComments,
transposed=True, ) #,hideRows=hideRows)
if 1:
self.appendRegressionTable(
models,
suppressSE=suppressSE,
substitutions=substitutions, #modelTeXformat=modelTeXformat,
tableFilePath=tableFilePath,
tableFormat=tableFormat,
sourceLogfile=sourceLogfile, #tableCaption=tableCaption, tableComments=tableComments,
transposed=False) #,hideRows=hideRows)
return
if tableFilePath == None:
tableFilePath = defaults['paths']['tex'] + 'tmpMissingTableName.tex'
if 0:
if tableCaption == None:
tableCaption = '(missing table caption)'
tableCaption += ' ' + tableFilePath.split('/')[-1]
if tableFilePath.endswith('.tex'):
tableFilePath = tableFilePath[:-4]
# Add either the whole logfile, if specified, or a concatenated version of each model output, if no logfile was specified.
# Add all caution comments for this table to the table Caption.?
if sourceLogfile is None:
sourceLogfile = [
LL + '\n'
for LL in sum(
[mm['rawLogfileOutput'].split('\n') for mm in models], [])
]
else:
sourceLogfile = open(sourceLogfile, 'rt').readlines()
twarnings = [
r'\framebox{' + str2latex(LL) + '}' for LL in sourceLogfile
if 'Warning' in LL or 'Caution' in LL or ("CAUTION" in LL and 'di '
not in LL)
]
tableFormat['comments'] += r' ' + r' '.join(twarnings) if len(
twarnings
) < 10 else r' \framebox{Warning!! More than TEN Cautions or Warnings were reported by Stata code for this estimate}'
# Write the tabular or longtable latex file that the master LaTeX will include.
#if not colnums:
# colnums=['' for cn in colnames]
if transposed is None: transposed = 'both'
assert transposed in ['both', True, False]
includedTex, wrapperTex, transposedChoice = composeLaTeXregressionTable(
models,
suppressSE=suppressSE,
substitutions=substitutions,
tableFormat=tableFormat,
transposed=transposed
) #,hideRows=hideRows),modelTeXformat=modelTeXformat,
# {'comments':tableComments,'caption':tableCaption,}
if {
True: 'true',
False: 'false'
}.get(transposedChoice, transposedChoice).lower() in ['true', 'both']:
assert 'BEGIN TRANSPOSED VERSION' in includedTex # File must have two versions of the table if we're to include the second.
if 'version' == "no... i'm changing things june 2011 to always use the same file, and fit both normal andtransposed in it.":
if transposedChoice: # NB: this "transposed" is reset by the call to composeLaTeXtable
tableFilePath = tableFilePath + '-transposed'
fout = open(tableFilePath + '.tex', 'wt', encoding='utf-8')
fout.write(
includedTex + '\n\n%' + '% '.join(sourceLogfile)
) # Append entire Stata log file to end of each LaTeX table file.
fout.close()
# 2010 Jan: Also create a .csv file *from* the .tex.
###from cpblUtilities import cpblTableToCSV
fout = open(tableFilePath + '-tex.csv', 'wt')
fout.write(tableToTSV(includedTex))
fout.close()
print ' Appended table: ' + tableFilePath
###################################################################################
# Add this table as in include in a master LaTeX file that includes all the tables...
# Do landscape, if desired. Decide this separately for each table.
# Following lscape stuff should be moved to texopening/closing.: trash me/it
#lscapeb,lscapee='',''
#if landscape==True or (len(models)>9): # Choose it automatically. if not forced
# lscapeb,lscapee=r'\begin{landscape}',r'\end{landscape}'
self.append(r'\newpage ' + wrapperTex.replace(
'PUT-TABLETEX-FILEPATH-HERE',
tableFilePath.replace(defaults['paths']['tex'], r'\texdocs ')) +
'\n\n')
# Also create a standalone PDF of this table
cpblTable_to_PDF(tableFilePath, aftertabulartex = r' {\footnotesize\cpblColourLegend} ')
if transposed in ['both', True]:
cpblTable_to_PDF(tableFilePath, aftertabulartex = r' {\footnotesize\cpblColourLegend} ', transposed=True)
return
###########################################################################################
###
def old_forPairedRows_appendRegressionTable(
self,
colnames,
colnums,
coefrows,
extrarows,
greycols=None,
suppressSE=False,
substitutions=None,
modelTeXformat=None,
transposed=None,
tableFilePath=None,
tableCaption=None,
tableComments=None,
landscape=False,
rowModelNames=None,
hideRows=None
): # landscape is deprecated: it's chosen automatically.
###
#######################################################################################
# Add this table as in include in a master LaTeX file that includes all the tables...
# Do landscape, if desired. Decide this separately for each table.
# Create the actual latex file that is to be included, as well, through a call to composeLaTeXtable
"""
If rowModelNames is specified, use them for transposed tables only.
"""
if isinstance(transposed, str) and transposed == 'both':
self.old_forPairedRows_appendRegressionTable(
colnames,
colnums,
coefrows,
extrarows,
suppressSE=suppressSE,
substitutions=substitutions,
modelTeXformat=modelTeXformat,
tableFilePath=tableFilePath,
tableCaption=tableCaption,
tableComments=tableComments,
landscape=landscape,
transposed=True,
rowModelNames=rowModelNames,
hideRows=hideRows)
self.old_forPairedRows_appendRegressionTable(
colnames,
colnums,
coefrows,
extrarows,
suppressSE=suppressSE,
substitutions=substitutions,
modelTeXformat=modelTeXformat,
tableFilePath=tableFilePath,
tableCaption=tableCaption,
tableComments=tableComments,
greycols=greycols,
landscape=landscape,
transposed=False,
rowModelNames=rowModelNames,
hideRows=hideRows)
return
if tableFilePath == None:
tableFilePath = defaults['paths']['tex'] + 'tmpMissingTableName.tex'
if tableCaption == None:
tableCaption = '(missing table caption)'
tableCaption += ' ' + tableFilePath.split('/')[-1]
if tableFilePath[-4:] == '.tex':
tableFilePath = tableFilePath[:-4]
# Write the tabular or longtable latex file that the master LaTeX will include.
if not colnums:
colnums = ['' for cn in colnames]
#includedTex,texOpening,texClosing,transposedChoice =
includedTex, wrapperTex, transposedChoice = old_uses_pairedRows_composeLaTeXtable(
colnames,
colnums,
coefrows,
extrarows,
suppressSE=suppressSE,
substitutions=substitutions,
modelTeXformat=modelTeXformat,
caption=tableCaption,
greycols=greycols,
comments=tableComments,
transposed=transposed,
rowModelNames=rowModelNames,
hideRows=hideRows,
landscape=landscape)
if transposedChoice: # NB: this "transposed" is reset by the call to composeLaTeXtable
tableFilePath = tableFilePath + '-transposed'
fout = open(tableFilePath + '.tex', 'wt', encoding='utf-8')
fout.write(includedTex)
fout.close()
debugprint('AppendTable: ', tableFilePath)
###################################################################################
# Add this table as in include in a master LaTeX file that includes all the tables...
# Do landscape, if desired. Decide this separately for each table.
# Following lscape stuff should be moved to texopening/closing.: trash me/it
#lscapeb,lscapee='',''
#if landscape==True or (len(models)>9): # Choose it automatically. if not forced
# lscapeb,lscapee=r'\begin{landscape}',r'\end{landscape}'
self.append(r'\newpage ' + wrapperTex.replace(
'PUT-TABLETEX-FILEPATH-HERE',
tableFilePath.replace(defaults['paths']['tex'], r'\texdocs ')) +
'\n\n')
## if 1: # 18 Marc 2008: seems to be a bug in my use of include, so I am eliminating it for now. #:(
## self.append(wrapperTex''.join([ texOpening, tableFilePath , texClosing]))
## else:
## self.append('\n'.join([ texOpening, includedTex , texClosing]))
return
###########################################################################################
###
def toDict(self,
line,
depvar=None,
regoptions=None,
method=None,
defaultValues=None): # Used to be called regDict
###
#######################################################################################
""" This is a utility to convert from old string list form of a regression model to the newer dict format.
defaultValues is an alternative form for setting fields that are explicitly listed as the other optional parameters.
nov 2009: this is completely obselete, since the old format is no longer allowed. there are new methods and conversions for "do file format" (string) and for "defaultModel" in other... See regTable()...
may 2010: Actually, not quite obselete. bySurvey() still uses it ...
"""
# It might be easily identifiable as a list of old-format models:
if isinstance(line, list) and isinstance(line[0], list):
return ([self.toDict(LL) for LL in line])
#if defaultValues==None:
# defaultValues={}
defaultValues = [deepcopy(defaultValues), {}][defaultValues == None]
# Incorporate other keywords into the defaultValues dict:
for optparam, kk in [[depvar, 'depvar'], [regoptions, 'regoptions'],
[method, 'method']]:
if optparam:
defaultValues[kk] = depvar
# And some fields may be considered mandatory:
if 'flags' not in defaultValues:
defaultValues['flags'] = []
## if ....depvar:
## dd['depvar']=depvar
## if 'regoptions' not in dd and regoptions:
## dd['regoptions']=regoptions
## if method and 'method' not in dd:
## dd['method']=method
if isinstance(line, dict): # It might not need converting:
dd = deepcopy(line)
#if 'flags' not in line:
# line['flags']=[]
else: # It does need converting from old list of strings,etc format:
dd = dict()
# It could be just a string, the really simplest format:
if isinstance(line, str):
line = ['', line]
dd['name'] = line[0]
dd['model'] = line[1]
if len(line) > 2:
# line+=[[]]
dd['flags'] = line[2]
if len(line) > 3:
dd['format'] = line[3]
if len(line) > 4:
dd['code'] = {'before': line[4], 'after': line[5]}
if len(line) > 6:
dd['regoptions'] = line[6]
if len(line) > 7:
dd['feGroup'] = line[7]
# Now fill in any missing values that were specified separately
for kk in defaultValues:
if not kk in dd:
dd[kk] = defaultValues[kk]
return (dd)
###########################################################################################
###
def withCellDummies(self,
lines,
cellVariables,
cellName=None,
nCounts=None,
clusterCells=True,
dropvars=None,
minSampleSize=300,
defaultModel=None): #depvar=None,regoptions=None):
###
#######################################################################################
"""
To test this:
import pystata
L=pystata.latexRegressionFile('tmp1234')
mmm=L.withCellDummies([['','lsatis da_stuff etc if 1',[['survey','EDS']]], ['','lsatis da_stuff etcother if 1',[['survey','GSS17']]]],['DAuid'])
That is, I want to split data up into cells based on certain
variables, for instance, geographic identifiers.
I want to make sure there are at least a certain number of samples in
each cell which have good values for all the regressors in the model,
and eliminate the small cells.
I want to possibly cluster on those cells...
Let this function work on just one line of a model. Then the byCR function can use it (it needs also to drop higher CR level vars), specifying just one CRuid as the cell var...
Algorithm: look for "survey" attribute of the line (model),
which must be in 3 or more element format, so that the third
element is a list of attributes.
Unless specified otherwise, clustering is turned on at the cell level, using the options-specification feature of regseries.
16 April 2008: Adding a Stata "if" statement to condition the regression on successful generation of cell dummies.
[2010: Really?? It looks like this comment is for withCR, not withCell] Ah, what the real problem is is that if the model includes
other conditions ("if"s), they reduce the group size of some,
so I am getting completely determined samples. (see notes for
19 March 2008). Or, some variables may not be available for
all samples, also reducing the number in a CR group. When
forming CR/survey cells, must make sure that all regressors
exist for those samples included. Well... no: only those variables which *ought* to exist for the given survey. This is very kludgey and specific now, but look to see if a survey is selected and if so, restrict the variables used for cell counts to those that we expect should be there.
If surveys are specified as attributes, the cells will be only within those surveys; the surveys restriction can also exist in the if clause of the model description, though.
Oh dear IO have to create a 5th element in model lines. this will be stata code which gets written before the regression is done...
ugh.
Aug 2008: I am adding "depvar" as an option: you can specify a default depvar to fill in unfilled field depvar for each model.
Oct 2008: I am adding "regoptions" as an option. Specify it here as a default. Note how valuable this is: since you cannot specify it as a default later, in regtable, since "regoptions" will already be populated by this function, ie with a cluster option (for stata).
OCt 2008: Eliminated "depvar" and "regoptions" in favour of defaultModel, which can have the former two fields.
"""
if cellName == None:
cellName = 'cells'
from copy import deepcopy
if nCounts == None:
nCounts = 5
modelsout = []
from pprint import pprint
# Check for format of passed lines, and possibly recursively iterate. lines should really be called models, since it now can be a list of models/lists of models.
if not lines or lines == [[]]:
return ([])
if isinstance(lines, dict):
debugprint('Found dict')
lines = [lines]
elif isinstance(lines, list) and all([
isinstance(onee, dict) or
(isinstance(onee, list) and isinstance(onee[0], str))
for onee in lines
]):
debugprint(
'Found list of nothing but ',
len(lines),
' models (in dict or old format); so proceed with standard loop that will treat them as one group'
)
for mm in lines:
mm = self.toDict(mm)
#mm=self.convertMtoDict(mm)
else:
debugprint(
'so must have a list of ',
len(lines),
' lists/modelslists, or else the models are not in dict format?'
)
for lineOrGroup in lines:
debugprint(
'For a linegroup ',
lineOrGroup,
' generated ',
self.withCellDummies(
lineOrGroup,
cellVariables,
cellName=cellName,
nCounts=nCounts,
clusterCells=clusterCells,
dropvars=dropvars,
minSampleSize=minSampleSize,
defaultModel=defaultModel)
) #depvar=depvar,regoptions=regoptions))
modelsout += self.withCellDummies(
lineOrGroup,
cellVariables,
cellName=cellName,
nCounts=nCounts,
clusterCells=clusterCells,
dropvars=dropvars,
minSampleSize=minSampleSize,
defaultModel=defaultModel
) #,depvar=depvar,regoptions=regoptions)
debugprint(' Ended with ', modelsout)
return (modelsout)
# So now we are sure that we have just a list of models (in dict or old form), though the list could be length one.
if not cellVariables: # Facility for degenerate use of this function: e.g. a loop might have a case without any cell variables. Then just return what we got.
return (lines)
oneGroupModels = []
for model in deepcopy(lines):
if 'isManualEntry' in model:
continue
# Option to supply depvar in the function call: Do not override ones already specified.
#if 'depvar' not in model and depvar:
# model['depvar']=depvar
# Option to supply regoptions in the function call: Do not override ones already specified.
#if 'regoptions' not in model and regoptions:
# model['regoptions']=regoptions
if defaultModel:
for field in defaultModel:
if field not in model:
model[field] = deepcopy(defaultModel[field])
stataBeforeOut, stataAfterOut = '', ''
# Find surveys, if there is one. Ignore "all~n" entries for "survey" attribute.
if isinstance(model.get('flags', ''), dict):
surv = dgetget(model, 'flags', 'survey', '')
else:
surv = [
aaa[1] for aaa in model.get('flags', [])
if aaa and isinstance(aaa, list) and aaa[0] == 'survey'
]
surveys, dsurveys = [], ' 1 ' # ie "true" in an if condition
if surv:
surveys = [
sss for sss in surv[0].replace(' ', ',').split(',')
if not 'all~' in sss
]
if len(surveys) > 0:
dsurveys = ' (' + ' | '.join(
['d%s==1' % ss for ss in surveys]) + ') '
# Surely the above is redundant. if the survey selection is already in the if clause. Why do this here?
# Find regressors; find if conditions:
if ' if ' not in model['model']:
model['model'] += ' if 1'
parts = model['model'].split(' if ')
assert len(parts) < 3
regressors = [pp for pp in parts[0].split(' ') if pp]
# Construct appropriate dummy: for this specified set of variables ("cellVariables") and for this survey or these surveys:
# Does not apply to this function.:
# Also, remove any regressors which look like they are at this (or higher...) CR level:
# useRegressors=[rr for rr in regressors if not rr[0:3].lower() in higherLevels[aCR] and not rr[0:4].lower() in higherLevels[aCR]]
useRegressors = regressors
if dropvars == None:
dropvars = ''
dropvars += ' ' + cellVariables
if not dropvars == None:
for dv in uniqueInOrder([
dvv for dvv in dropvars.split(' ')
if dvv and dvv in useRegressors
]):
useRegressors.remove(
dv) #=useRegressors.replace(' '+dv+' ',' ')
# Also, just for the dummy selection, remove any other variables which we do not think exist for any of the chosen surveys:
expectedExistRegressors = ['1'] + [
rr for rr in useRegressors if inanyCodebook(rr, surveys)
]
droppedList = set(expectedExistRegressors) - set(
useRegressors) - set(['1'])
if droppedList:
print('Ignoring existence of ', droppedList, ' for ', surveys)
# Count how many people in each bin for the coming regression:
stataBeforeOut+="""
capture drop ttt_*
capture drop tttvvv_*
capture drop dummyOne
gen dummyOne=1
""" # Safely clear old counters (Could have used "capture" instead)
if isinstance(cellVariables, list):
cellVariables = ' '.join(cellVariables)
# Following command is equivalent to use egen with group() and then counting??
stataBeforeOut += '\n bysort ' + cellVariables + ': egen ttt_' + cellName + '=count(dummyOne) if ' + parts[
1] + ' & ' + dsurveys + ' & ' + ' & '.join(
['%s<.' % rr for rr in useRegressors])
# Also ensure (if depvar is known) that there is at least some variation in depvar within each cell
if 'depvar' in model:
stataBeforeOut += '\n bysort ' + cellVariables + ': egen tttvvv_' + cellName + '=sd(' + model[
'depvar'] + ') if ' + parts[
1] + ' & ' + dsurveys + ' & ' + ' & '.join(
['%s<.' % rr for rr in useRegressors])
else:
stataBeforeOut += '\n gen tttvvv_' + cellName + '=1'
# 2013 Feb: btw, is it a simpler method to say: by year wp5:egen nSample =total(dummyOne)
# Also must make some dummies myself here for these regions and this survey.
#stataBeforeOut+='\n gen ddd_null=. \n drop ddd_* ' # Safely clear old dummies
stataBeforeOut += """
capture drop ddd_*
egen dcelltmp= group( """ + cellVariables + """)
quietly: tab dcelltmp if ttt_""" + cellName + """ >= """ + str(
nCounts
) + """ & tttvvv_""" + cellName + """>0 & ttt_""" + cellName + """ <. & """ + parts[
1] + """, gen(ddd_""" + cellName + """)
drop dcelltmp
"""
# Condition doing the regressions on success of this dummy generation.
stataBeforeOut += "\n capture noisily confirm numeric variable ddd_" + cellName + "1, exact\n if _rc==0 & r(N)>" + '%d' % minSampleSize + " & r(r)>1 { * Condition on number of respondents, number of clusters, and existence of some " + cellName + " dummies\n"
# If there are not enough samples, create a blank line in eventual output, showing number of samples. (In fact, the line below is safe to the possibility that ddd_cellname could not even be created.: in that case, the number of samples will be all-encompassoing)
stataAfterOut += '\n }\n else { \n reg dummyOne dummyOne \n capture reg dummyOne dummyOne ddd_' + cellName + '* \n } \n matrix est=e(b) \n est\n'
# Wrap up:
# Aghhh. so far must be one survey exactly..
model['model'] = ' ' + ' '.join(
useRegressors) + ' ddd_' + cellName + '* if ' + parts[1]
if isinstance(model.get('flags', ''), dict):
model['flags'][cellName + '~f.e.'] = True
else:
model['flags'] = model.get('flags', []) + [cellName + '~f.e.']
if 'code' not in model:
model['code'] = dict(before='', after='')
assert 'cellDummiesBefore' not in model['code']
model['code']['cellDummiesBefore'] = stataBeforeOut
model['code']['cellDummiesAfter'] = stataAfterOut
if clusterCells:
if 'regoptions' not in model and method not in ['rreg']:
model[
'regoptions'] = ', robust' # Need to have comma, at least here...
if 'cluster(' in model['regoptions']:
print "Warning!! I am not putting the cell variable in as cluster because you already have a cluster variable! ", model[
'regoptions']
else:
model['regoptions'] += ' cluster(%s) ' % cellVariables
if isinstance(model.get('flags', ''), dict):
model['flags'][
'clustering'] = '{\smaller \smaller %s}' % cellName
else:
model['flags'] += [[
'clustering', r'{\smaller \smaller %s}' % cellName
]]
#print ' Revised model: ',model
oneGroupModels += [model]
#modelsout+=[oneGroupModels]
return (oneGroupModels)
###########################################################################################
###
def withCRdummies(self,
models,
CRs,
each=False,
nCounts=None,
clusterCRs=True,
minSampleSize=800,
defaultModel=None,
manualDrops=None): # aka "byCR"
###
#######################################################################################
"""
What does this do? It replicates the given sets of models so that each is run with a series of sets of CR values / dummies included/excluded in order to isolate effects at each level.
This makes use of withCellDummies, above. The only thing that should still be done here is dropping higher CR level vars. and iterating over CRs. [done]
to test this: same as prev function, except:
import pystata
L=pystata.latexRegressionFile('tmp1234')
mmm=L.withCRdummies([convertMtoDict(['','lsatis da_stuff pr_this csd_that if 1',[['survey','EDS']]]), convertMtoDict( ['','lsatis da_stuff pr_this csd_that if 1',[['survey','GSS17']]]), ],['PR','CSD'])
or maybe: (two surveys, three CRs, and four regression models)
mmm=L.withCRdummies([
[convertMtoDict(['','lsatis da_stuff pr_this csd_that if 1',[['survey','EDS']]]), convertMtoDict(['','lsatis da_stuff pr_this csd_that if 1',[['survey','GSS17']]]),],
[convertMtoDict(['','lsatis modelTwothings da_stuff pr_this csd_that if 1',[['survey','EDS']]]), convertMtoDict(['','lsatis otherThings da_stuff pr_this csd_that if 1',[['survey','GSS17']]])],
[convertMtoDict(['','lsatis modelThreethings da_stuff pr_this csd_that if 1',[['survey','EDS']]]), convertMtoDict(['','lsatis otherThings da_stuff pr_this csd_that if 1',[['survey','GSS17']]])],
[convertMtoDict(['','lsatis modelFourthings da_stuff pr_this csd_that if 1',[['survey','EDS']]]), convertMtoDict(['','lsatis otherThings da_stuff pr_this csd_that if 1',[['survey','GSS17']]])],
],['PR','CSD','CT'])
"""
""" This function removes any regressors that look like they
are determined at the same CR level that is being controlled
for, since Stata will randomly choose one of the available
variables at the CR level to leave in, and we want it to be
the dummy.
This is rather specific and therefore probably finicky.
If each=True, the idea is to keep collections of the CR
dummies together, so we see the coefficients grouped by the
rest of the model.
Consider a set of modeles passed as: [1,[2,3,4],[5,6]]. ie there are two sets of grouped ones (which regTable will takes means of) and one ungrouped one.
How do I preserve this ordering?
I want the result for, say, two CRs to be: [1,1',[2,3,4],[2',3',4'],[5,6],[5',6']]
if any of the top level list members is another list of lists (ie list of models), then do a recursive loop to parse them.
Otherwise, parse as a group.
So: this function can now take complex nested sets of models.
Examples: 1 (not allowed); [1]; [1,2,3]; [[1,2,3]]
May 2008: Adding a note of group names for each group of CR models...
Aug 2008: [Obselete] I am adding "depvar" as an option: you can specify a default depvar to fill in unfilled field depvar for each model. This is passed on to celldummies.
Oct2 008: I am replacing "depvar" option with "defaultModel" option!
Aug 2010: Generalising so that if CR doesn't look like a CR, it may be a Gallup geographic level. (or USA, in future?)
manualDrops = ??
"""
from copy import deepcopy
if nCounts == None:
nCounts = 5
if manualDrops is None:
manualDrops = {}
##assert(isinstance(models[0],list))
modelsout = []
from pprint import pprint
###print len(models),len(models[0])#,len(models[1])
#if any([isinstance(model,list) and isinstance(model[0],list) for model in models]):
debugprint('-------------werwerwe', models)
# What object do we have?
if isinstance(models, str):
models = self.str2models(models, defaultModel=defaultModel)
if isinstance(models, dict):
debugprint('Found dict')
elif isinstance(models, list) and all(
[isinstance(onee, dict) for onee in models]):
debugprint('Found list of nothing but dicts')
#elif any([isinstance(model,list) and (isinstance(model[0],list) or isinstance(model[0],dict)) for model in models]):
# debugprint('Found list of lists that are not models')
else:
debugprint(
'so must have a list of lists, or else the models are not in dict format?'
)
# This will fail if models are passed not in dict form.!
if not isinstance(models, dict) and not isinstance(models[0], dict):
debugprint(
'withCRdummies is recursively looping over %d elements\n' %
len(models))
for modelOrGroup in models:
if isinstance(
modelOrGroup,
dict): # or not isinstance(modelOrGroup[0],list):
debugprint(' withCRdummies entry: a single model!\n',
modelOrGroup)
modelOrGroup = [modelOrGroup]
else:
debugprint(' withCRdummies entry: length %d \n' %
len(modelOrGroup))
pass
modelsout += self.withCRdummies(
modelOrGroup,
CRs,
each=each,
nCounts=nCounts,
clusterCRs=clusterCRs,
minSampleSize=minSampleSize,
defaultModel=defaultModel,
manualDrops=manualDrops)
return (modelsout)
"""
if not isinstance(models,dict) and (\
(isinstance(models,list) and isinstance(models[0],dict)) \
or any([isinstance(model,list) and isinstance(model[0],list) for model in models])):
debugprint ('withCRdummies is recursively looping over %d elements\n'%len(models))
for modelOrGroup in models:
if isinstance(modelOrGroup,dict) or not isinstance(modelOrGroup[0],list):
debugprint( ' withCRdummies entry: a single model!\n',modelOrGroup)
modelOrGroup=[modelOrGroup]
else:
debugprint (' withCRdummies entry: length %d \n'%len(modelOrGroup))
pass
modelsout+=self.withCRdummies(modelOrGroup,CRs,each=each,nCounts=nCounts)
return(modelsout)
"""
# Note: HR cannot be ranked this way, so including HR in below is a kludge for testing.this is a kludge for testing.
higherLevels = {
'CT': ['CT', 'A15', 'CSD', 'HR', 'CMA', 'PR'],
'CSD': ['CSD', 'HR', 'CMA', 'PR'],
'HR': ['HR', 'CMA', 'PR'],
'CMA': ['CMA', 'PR'],
'PR': ['PR'],
'wp5': ['wp5'],
'subregion': ['subregion', 'wp5'],
}
for aCR in higherLevels: # Add underscores to these; I use them for variable prefixes.
higherLevels[aCR] = [cc.lower() + '_' for cc in higherLevels[aCR]]
if each == True: #(...? huh?)
#assert 0 # This assert added Setp2009 since it looks like this is a not-implemented feature?. Well, actually I'll leave it for now. Do not understand.
return
# Ensure all models are in the modern format:
models = deepcopy(models)
assert all([isinstance(model, dict)
for model in models]) # Otherwise out of date / impossible
# Treat models as a group; cycle through each of them together before moving on to next CR:
# By this point, single-model sets come looking like a group (ie as [[list]])
dums = {}
global globalGroupCounter
""" This counter / CRgroups label will be the same for a group of models run over different surveys and also with different CR controls. Later on, the *averaged over surveys* version of these can be grouped based on this CRgroups marker to find how to collect coefficients from the means. Note that there is always one model (the first in a group) run without any CR fixed effects. This should also get the CRgroup marker.
"""
globalGroupCounter += 1
if isinstance(CRs, str):
CRs = [CRs]
# Following is a lookup that tells which CR coef is isolated by a given CR dummies set: ie it's one smaller than the set of dummies.
topCoefficient = dict(
[[CRs[iCR], (CRs[1:] + [''])[iCR]] for iCR in range(len(CRs))])
for aCR in CRs:
dummyStr = '%s~f.e.' % aCR
## if aCR=='': # Make one copy with no dummies
## amodel=deepcopy(models)
## amodel['CRgroup']={'CR%03d'%globalGroupCounter:aCR}#+parts[1].replace(' ','')}
## modelsout+=[amodel]
## debugprint ('No change for this model\n')
## continue
debugprint(' byCR: for %s, looping over the %d models.\n' %
(aCR, len(models)))
oneGroupModels = []
# Call the more general function to do most of the work:
#models=self.withCellDummies(deepcopy(models),aCR,cellName=aCR.replace('uid',''),nCounts=nCounts,clusterCells=clusterCRs)
for model in deepcopy(models):
#!stataBeforeOut,stataAfterOut='',''
#print ' byCR: Original model: ',model
# Find surveys, if there is one. Ignore "all~n" entries for "survey" attribute.
#!surv=[aaa[1] for aaa in model[2] if isinstance(aaa,list) and aaa[0]=='survey'][0]
#!surveys=[sss for sss in surv.replace(' ',',').split(',') if not 'all~' in sss]
#print 'Found survey: ',surveys
#if ',' in surv or ' ' in surv:
# print "Multiple surveys in this model"
#!dsurveys=' 1 ' # ie "true" in an if condition
#!if len(surveys)>0:
#! dsurveys=' ('+ ' | '.join(['d%s==1'%ss for ss in surveys]) + ') '
#dums= ' td%s_%s* '%(aCR,surv[0])
# Find regressors; find if conditions:
######model['CRgroup']={'CR%03d'%globalGroupCounter:aCR}#+parts[1].replace(' ','')}
model['CRgroup'] = {
'id': 'CR%03d' % globalGroupCounter,
'fixedeffects': aCR,
'takeCoef': topCoefficient[aCR],
'addend': len(models) > 1
} #,'isaddend':''}}
if aCR == '': # Make one copy with no dummies
if defaultModel:
for field in defaultModel:
if field not in model:
model[field] = deepcopy(defaultModel[field])
debugprint('No change for this model\n')
modelC = deepcopy(model)
else:
if ' if ' not in model['model']:
model['model'] += ' if 1'
parts = model['model'].split(' if ')
assert len(parts) < 3
regressors = [pp for pp in parts[0].split(' ') if pp]
# Also, remove any regressors which look like they are at this (or higher...) CR level:
useRegressors = [
rr for rr in regressors
if not rr[0:3].lower() in higherLevels[aCR] and
not rr[0:4].lower() in higherLevels[aCR] and rr not in
manualDrops.get(aCR, [])
]
model['model'] = ' '.join(
useRegressors) + ' if ' + parts[1]
if aCR in crlist:
acrsuffix = 'uid'
else:
acrsuffix = ''
modelC = self.withCellDummies(
[model],
aCR + acrsuffix,
cellName=aCR,
nCounts=nCounts,
clusterCells=clusterCRs,
minSampleSize=minSampleSize,
defaultModel=defaultModel)[0]
oneGroupModels += [modelC]
modelsout += [oneGroupModels]
#pprint(modelsout)
return (modelsout)
###########################################################################################
###
def removeUnavailableVariables(self,
indepvars,
survey,
forceKeepVars=None,
forceDropVars=None):
# withCRdummies=CRs,withCRdummiesCounts=None):
###
#######################################################################################
"""
********* How does this relate to the fancier removeUnavailableVars, which is wave and country-dependent? I guess this just looks for vars that are completely unknown.
"""
if 1: # I think I must have lost a whole bunch of code right here, since I got an indenting error::
if forceKeepVars == None:
forceKeepVars = []
if forceDropVars == None:
forceDropVars = []
# Clean up in case entire regression command was passed:
indepvarsReg = indepvars.split(' if ')[0] # Get rid of if clauses
indepvarsReg = indepvarsReg.split('[')[0] # Get rid of weights
# Check for this explicity list
unavailable = [
] #[['unemployed',['GSS17','GSS19']],['mastery',['GSS19']],['lnhouseValue',['ESC2','GSS17']],['mortgagePayment',['ESC2','GSS17']],]
removed = []
for una in unavailable:
if survey in una[1]:
indepvars = indepvars.replace(' ' + una[0] + ' ', ' ')
removed += [una[0] + '[%s]' % survey]
# Check using more general check that might be flawed by missing some variables: ie drop too many:
for var in [vv for vv in indepvarsReg.split(' ') if vv]:
if var in forceDropVars or (not inanyCodebook(
var, survey) and not inanyCodebook(
var.replace('lnR', '').replace('ln', ''),
survey) and not isGeneralVar(var) and
not var in forceKeepVars):
indepvars = indepvars.replace(' ' + var + ' ', ' ')
removed += [var + '[%s]' % survey]
return (indepvars, removed)
###########################################################################################
###
def addSeparator(self, models):
###
#######################################################################################
' Simple tool to set the format field of the final model in a possibly-nested list of models so that a separator will appear in the table'
if isinstance(models, list):
if models:
self.addSeparator(models[-1])
return
if isinstance(models, dict):
if not 'format' in models:
models['format'] = 'c'
models['format'] += '|'
return
###########################################################################################
###
def flattenModelList(self, nestedModelList, separators=True):
###
#######################################################################################
""" One way to construct a big table with many models is to create a
nested for loop (that is, if looping over several different types of
variation in the model). You can then end up with a multidimensional
list. But regTable requires a 1 or 2 dimension-deep list.
This function flattens a list by one dimension (the highest-level one) and adds a separator at
the end of it.
"""
assert isinstance(nestedModelList, list)
import operator
flattened = reduce(operator.add, nestedModelList, [])
if separators:
self.addSeparator(flattened)
return (flattened)
###########################################################################################
###
def bysurvey(self,
surveys,
model,
includePooled=False,
eliminateMissing=True,
forceKeepVars=None):
# withCRdummies=CRs,withCRdummiesCounts=None):
###
#######################################################################################
"""
This is just a helper tool for making calls to regTable. It does not really need to be in the class.
It copies the model (column in a regression table) for each survey given and removes any RHS variables that are not available for each given survey.
surveys is a list of surveys.
'Model' takes one line suitable for a model list in latex.reg~Table(). I believe it can take either old form (list of strings) or new form (dict) for each model.
You do not need to use this helper tool: you can just put several rows together in a list in regseries, for instance if you want different conditions in each case...
2008 July: If the dependent variable is also a regressor, the latter will be removed. This feature should not really be here. Should be in regTable.. Move it -->O
2008 June: if the dependent variable is not in the survey data, the model will simply be dropped.
2008 March 12:
March 2008: Now including a feature which could later be
informed by the master codebook but is for the moment
hardcoded: it removes unavailable variables from regression
equations.
March 2008: Adding an option to include a column with the
listed surveys all thrown (pooled) together, not to be part of
any average.
It appears to return a list of lists of models. This is so that groups of models that vary only in survey can stay as groups, yet this function may return more than one: one pooled model plus a group of by-survey ones.
"""
from copy import deepcopy
model = deepcopy(model)
if forceKeepVars == None:
forceKeepVars = []
""" Input model can actually now be a list of them. It can also be in different forms, still: """
if isinstance(model, dict):
" Good; this is what it's supposed to be. Proceed"
elif isinstance(model, list) and isinstance(model[0], str):
" This is a simple single model but in old fashioned format. Convert and proceed"
return (self.bysurvey(
surveys,
self.toDict(model),
includePooled=includePooled,
eliminateMissing=eliminateMissing,
forceKeepVars=forceKeepVars))
elif isinstance(model, list) and isinstance(
model[0], dict
): # added late July 2008: dangerous.. this has been working for a while. Why now not?
" This is a list of dicts. Convert each one to a set over surveys."
fafa = self.bysurvey(
surveys,
model[0],
includePooled=includePooled,
eliminateMissing=eliminateMissing,
forceKeepVars=forceKeepVars)
return ([
self.bysurvey(
surveys,
oneDict,
includePooled=includePooled,
eliminateMissing=eliminateMissing,
forceKeepVars=forceKeepVars)[0] for oneDict in model
])
else: #elif isinstance(model,list) and all([isinstance(mm,dict) for mm in model]): # Simple list of dicts
" Some kind of list. Let recursion figure out what."
modelsout = []
for mm in model:
modelsout += self.bysurvey(
surveys,
mm,
includePooled=includePooled,
eliminateMissing=eliminateMissing,
forceKeepVars=forceKeepVars)
return (modelsout)
## elif isinstance(model,list) and not isinstance(model[0],list): # This is a single, list-form model
## model=convertMtoDict(model)
## if isinstance(model,list) and not isinstance(model[0],str): # This is a single, list-form model
## model=convertMtoDict(model)
## if isinstance(model,list) and (isinstance(model[0],dict) or isinstance(model[0],list)): # This is a list of models
## # So iterate recursively over them, and then return
## modelsout=[]
## for mm in model:
## modelsout+=self.bysurvey(surveys,mm,includePooled=includePooled,eliminateMissing=eliminateMissing,forceKeepVars=forceKeepVars)
## return(modelsout)
shortname = defaults['shortnames']
# If model has three elements:
#assert(len(model)==3)
rows = []
removed = []
if isinstance(surveys, str):
surveys = [surveys]
for surv in surveys:
# Skip full parsing of the various ways the third argument can be specified. Assume it's a list of pairs already.
# Check that the dependent variable is available for this survey:
if 'depvar' in model:
if not inanyCodebook(model['depvar'].replace(
'lnR', '').replace('ln', ''),
surv) and not isGeneralVar(
model['depvar']) and not model[
'depvar'] in forceKeepVars:
debugprint(
'Dropping this regression altogether! %s is not in %s'
% (model['depvar'], surv))
continue
if ' if ' in model['model']:
mm = model['model'].replace(' if ', ' if d%s==1 & ' % surv)
else:
mm = model['model'] + ' if d%s==1 ' % surv
forceDropVars = []
if 'depvar' in model:
forceDropVars = model['depvar']
if eliminateMissing:
mm, removeda = self.removeUnavailableVariables(
mm,
surv,
forceKeepVars=forceKeepVars,
forceDropVars=forceDropVars)
removed += removeda
else:
removed = []
newrow = deepcopy(model)
newrow['model'] = mm
if 'flags' not in newrow:
newrow['flags'] = []
newrow['flags'] += [
['survey', surv]
] # This "surv" will be translated later into a shortname version and in small font, but do not do it here, since this model liine may be parsed further.
#rows+=[{'name':model['name'],'model':mm,'flags':model['flags']+[['survey',surv]]} ]
rows += [newrow]
if removed:
debugprint(' bysurvey: Removed: ', ','.join(removed))
if includePooled and len(surveys) > 1:
# Eliminate all variables not available in *all* surveys (!)
conditionAny = ' | '.join(['d%s==1' % surv for surv in surveys])
if ' if ' in model['model']:
mma = model['model'].replace(' if ', ' if (%s) & ' %
conditionAny)
else:
mma = model['model'] + ' if (%s) ' % conditionAny
if eliminateMissing:
for surv in surveys:
mma, removeda = self.removeUnavailableVariables(
mma, surv, forceKeepVars=forceKeepVars)
removed += removeda
else:
removed = []
pooled = deepcopy(model)
pooled['model'] = mma
pooled['flags'] += [['survey', 'all~%d' % len(surveys)]]
return ([pooled, rows])
else:
return ([rows])
def _OBSELETE_DELETEME_readRegressionFile(self,
tableFileName,
useOUTREG2=False):
assert 0 # This is obselete as of August 2009. Keep it around for a year. It could be renamed readOutreg2
"""
Read regression results table made by Stata's outreg2 or Stata's est2tex...
As of 2009 August, this is probably obselete, since I am moving to read the log files directly.
"""
import os
tableFilePathNoSuffix = defaults['paths']['tex'] + tableFileName
if not os.path.exists(tableFilePathNoSuffix + '.txt'):
print ' Missing %s... Rerun Stata' % str2latex(tableFileName)
return (r'\newpage{ Missing %s... Rerun Stata}\newpage' %
str2latex(tableFileName))
#self.append (r'\newpage{\huge Missing $%s$... Rerun Stata}\newpage'%tableFileName)
#return(outs)
#print ' Reading '+defaults['paths']['tex']+tableFileName+'.txt --> '+str(tableFileName)+' ['+str(produceOnly)+'] '+str(extraTexFileSuffix)+' in --> '+self.fpathname
if not useOUTREG2 and len(
open(tableFilePathNoSuffix + '.txt', 'rt').readlines()) > 97:
print " Cannot use est2tex with more than 100 output lines. Stupid junk..."
assert 0
# Note the date that this regression was done.
import time, os
regressionDate = time.ctime(
os.path.getmtime(tableFilePathNoSuffix + '.txt'))
#comments+=' Regression carried out on '+regressionDate
# Interim code: figure out whether it's new or old format. Remember, outreg2 is slow, so still use the other for now unless needed.
infileUsedOutreg2 = open(tableFilePathNoSuffix + '.txt',
'rt').readlines()[1][0:9] == 'VARIABLES'
if not useOUTREG2 == infileUsedOutreg2:
print(
'************** OUTREG2 CHOICE DOES NOT MATCH!! SKIPPING THIS TABLE!'
)
print tableFileName + ': outreg2=' + str(useOUTREG2)
return (None) #return(outs)
if not useOUTREG2:
trows = [
line.strip('\n\r').split('\t')[1:]
for line in open(tableFilePathNoSuffix + '.txt', 'rt')
.readlines()
][1:] #Drop first row; drop first column of ech row
"""
1 c0 c1 c2 c3 c4 c5
1 cons 5.853 5.231 3.35 5.084 4.707 1.496
2 (.953) (1.238) (1.604) (.806) (1.524) (1.728)
"""
else: # txt file was created by OUTREG2... Deal with it's differences
trows = [
line.strip('\n\r').split('\t')[0:]
for line in open(tableFilePathNoSuffix + '.txt', 'rt')
.readlines()
][3:
-1] #Drop first three rows and final ; drop first column of ech row
# For outreg case, change some things so they're compatible with older version:
trows = [tt for tt in trows if not tt[0] == 'R-squared']
for tt in trows:
tt[0] = tt[0].replace('Observations', 'e(N)').replace('_', '-')
# Above line does not change trows.?.?
assert not any([tt[0] == 'R-squared' for tt in trows])
if any([tt[0] in ['e(r2-a)', 'r2_a'] for tt in trows]):
trows = [tt for tt in trows if not tt[0] == 'e(r2)']
assert not any([tt[0] in ['e(r2)', 'r2'] for tt in trows])
# Kludge: strip out z- prefix for beta mode:
for row in trows:
if row[0].startswith('z-'):
row[0] = row[0][2:]
# Strip out HERE ?.?. empty pairs??? (no, it's done later. If things seem buggy, maybe there are >100 variables)
"""
(1) (2) (3) (4)
VARIABLES lifeToday lifeToday lifeToday lifeToday
lnincomeh2 0.326*** 0.309*** 0.362*** 0.389***
(0.0576) (0.0663) (0.0534) (0.0602)
"""
return ({'trows': trows, 'regressionDate': regressionDate})
def str2models(self, statacode, defaultModel=None, before='before'):
""" Oct 2009: allow regTable to accept what is essentially a do-file of regressions......
This started out accepting only
Nov 2009: Now adding defaultModel as a way to add flags...
Dec 2009: add facility for statacode to be a list. some can be dicts (models), or '|', or strings of stata code..
2010 Feb: Added new option 'before' which can be used to specify which type of 'code' element the inter-regression text becomes. For instance, you may very well want to set it to be 'loadData' to make it come before anything else.
2010 March: take new comment: *flag:string at beginning of line sets "string" to True as a flag for the subsequent model.
2011 May: adding "*storeestimates:name" (if name: already given, will just use that. but colon needed no matter what)
I've added various other flag/settings specified starting with *.
"""
if isinstance(statacode, list):
lmodels = []
for mss in statacode:
if isinstance(mss, dict):
self.updateToDefaultModel(mss, defaultModel)
lmodels += [mss]
elif mss == '|':
assert lmodels
self.addSeparator(lmodels)
elif isinstance(mss, str):
lmodels += self.str2models(mss, defaultModel=defaultModel)
return (lmodels)
lines = [LL.strip('\n ') for LL in statacode.split('\n') if LL.strip()]
models = []
precode = ''
moreCodes = {}
extraFields = {} # Some fields can be specified by string.
if not defaultModel:
defaultModel = {}
for aline in lines: #TOdo2015: Integrate use of parseStataComments(txt) here:
if aline in ['|', '*|'
]: # Syntax to put a divider line between two models.
#assert models
self.addSeparator(models)
continue
words = [LL for LL in aline.split(' ') if LL]
method = words[0]
if method in [
'ivregress', 'ivreg'
]: # These take a second word, typically "2sls". ivreg2 does NOT.
method = ' '.join(words[0:2])
words = [method] + words[2:]
if method in [
'svy:reg', 'svy:regress', 'reg', 'areg', 'regress', 'rreg',
'ologit', 'glogit', 'oprobit', 'logit', 'probit', 'xtreg',
'ivregress 2sls', 'ivreg 2sls', 'ivreg2', 'glm'
]:
depvar = words[1]
therest = ' '.join(words[2:])
if '[' in aline:
model, regoptions = therest.split('[')
regoptions = '[' + regoptions
if ',' not in regoptions:
regoptions += ' ,'
elif ',' not in therest:
model, regoptions = therest, ''
else:
model, regoptions = therest.split(',')
if ',' not in regoptions:
regoptions = ', ' + regoptions
if 'robust' not in regoptions and 'vce(' not in regoptions and not method.startswith(
'svy:') and not '[iw=' in regoptions and method not in [
'rreg'
]: # Stata says can't do robust if iw?
regoptions += ' robust ' # Safety... I do not think it ever hurts?
toaddmodel = deepcopy(defaultModel)
assert before not in moreCodes
toaddmodel.update({
'model': model,
'depvar': depvar,
'method': method,
'regoptions': regoptions,
'code': {
before: precode,
'after': '',
}
})
toaddmodel.update(extraFields)
toaddmodel['code'].update(moreCodes)
models += [toaddmodel]
precode = ''
loaddata = ''
moreCodes = {}
extraFields = {}
#elif aline=='*|':
# self.addSeparator(models)# models+='|'
# TO DO!!!! This section should use parseStataComments(txt) instead.
elif method in ['gzuse', 'use']:
if 'loadData' not in moreCodes:
moreCodes['loadData'] = ''
moreCodes['loadData'] += aline + '\n'
elif aline.startswith(
'*name:'): # Syntax to add a flag to next model
precode += aline + '\n'
extraFields['name'] = ':'.join(aline.split(':')[1:])
elif aline.startswith(
'*storeestimates:'
): # Syntax to use Stata's "estimates store" after the regression [May 2011]
precode += aline + '\n'
sname = aline.split(':')[1]
if not sname and 'name' in extraFields:
sname = ''.join([
cc for cc in extraFields['name']
if cc.isalpha() or cc.isdigit()
])
assert sname
assert not dgetget(defaultModel, ['code', 'testsAfter'], '')
moreCodes['testsAfter'] = """
estimates store """ + sname + """
"""
extraFields['stataStoredName'] = sname
elif aline.startswith(
'*autoExcludeVars:'
): # Syntax to allow a non-missing variable to be missing for all in the sample.
extraFields['autoExcludeVars'] = aline.split(':')[1]
# To do: Following feature started to be implented July 2015. Erase this when it's done.
elif aline.lower().startswith(
'*groupname:'
): # Syntax to allow, in non-transposed mode, another title row labeling individual or groups (if they're adjacent) of columns. The "*name:" parameter is still shown, in another row below.
extraFields['modelGroupName'] = aline.split(':')[1]
elif aline.startswith(
'*meanGroupName:'
): # Syntax to allow grouping of estimates for calculating group mean coefficients
extraFields['meanGroupName'] = aline.split(':')[1]
elif aline.startswith(
'*flag:'): # Syntax to add a flag to next model
precode += aline + '\n'
aflag = aline[6:]
extraFields['flags'] = extraFields.get('flags', {})
if '=' in aflag:
extraFields['flags'][aflag.split('=')[0]] = aflag.split(
'=')[1]
else:
extraFields['flags'][aflag] = 1
elif aline.startswith(
'*flags:'): # Syntax to add a flag to next model
# Example with three flags: *flag:CR=foo:thisone=yes:robust
# This means you cannot have a colon in a flag value. Oh no. I think I should retract that feature. Okay, I'm changing it so that you can use "flags" if you want more than one, but none with a colon.
for aflag in aline.split(':')[1:]:
extraFields['flags'] = extraFields.get('flags', {})
if '=' in aflag:
extraFields['flags'][aflag.split('=')[
0]] = aflag.split('=')[1]
else:
extraFields['flags'][aflag] = 1
elif aline.startswith(
'*compDiffBy:'
): # Syntax to invoke an extra line of compensating differentials
precode += aline + '\n'
assert len(aline.split(':')) == 2
assert ' ' not in aline.split(':')[0]
extraFields['compDiffBy'] = aline.split(':')[1]
else:
debugprint(
'str2models: assuming line starting with "%s" is NOT a regression command!!!'
% method)
#precode+='* str2models: assuming line starting with "%s" is NOT a regression command!!!\n'%method
precode += aline + '\n'
assert not precode # If it ends with code... I guess this could be put in "post" of the last model.
return (models)
###########################################################################################
###
def updateToDefaultModel(self, models, defaultModel):
###
#######################################################################################
# The following (Sept 2008) makes redundant a whole bunch of oother flags, like the "depvar" to follow, within regTable. In Dec 2009, I moved this section of code here so that str2models could also use it.
"""
Have I overlooked this since??
Do I need it to treat 'code' like 'flags'?
"""
if isinstance(models, dict):
models = [models]
if 1:
for amodel in models:
for field in defaultModel:
assert (
field not in amodel or field == 'flags' or
amodel[field] == defaultModel[field]
) # I am attempting to overwrite something. This is dangerous. Except that I allow extra "flags" to be specified this way.
if field == 'flags' and field in amodel:
defflags = parseFlags(defaultModel['flags'])
modelflags = parseFlags(amodel['flags'])
for ff in defflags:
assert ff not in modelflags # You can use defaultModel to add extra flags, but not to overwrite existing flags.
modelflags[ff] = defflags[ff]
amodel['flags'] = modelflags
if field not in amodel:
amodel[field] = deepcopy(defaultModel[field])
return (
) # I do not think it is necessary to return anything... The above should be written not to overwrite pointers.
###########################################################################################
###
def generateLongTableName(self, tablename, skipStata=False):
###
#######################################################################################
# 2009Sept: Following section seems obselete now that I am not using est2tex. Let's keep the three-letter prefix anyway, but no longer truncate the rest of the filename. I'll do this just by changing the name length parameter from 25 to 100
# Following section gives a semi-unique name for the table, but if ever the assert catches, I should just change code to rename the output file when it's done (est2tex requires the file to have the same, length-limited name as the matrix...). output .txt file then will get renamed to tablenamelongform when done.
aa = 'ABCDEFGHIJKLMNOPQRSTUVWXZY'
maxnamelength = 100
tablePrefixes = [a for a in aa]
for a in aa:
for b in aa:
tablePrefixes += [a + b + c for c in aa]
# make a two-letter prefix which depends on exact *full* name, which will then be truncated:
pref = tablePrefixes[sum([ord(a) ^ 2
for a in tablename]) % len(tablePrefixes)]
tablenamel = pref + '-' + ''.join(
[c for c in tablename if c not in ' ():;"|-'
])[0:(maxnamelength - 3 - 1 - len(self.modelVersion) - len(
self.regressionVersion)
)] + '-' + self.modelVersion + self.regressionVersion
tablenamelongform = ''.join(
[c for c in tablename if c not in '():;"|']).replace(
' ', '-').replace(
'_',
'-') + '-' + self.modelVersion + self.regressionVersion
assert tablenamel not in self.txtNamesUsed or skipStata == True # If this occurs, you probably need to make sure you are reloading pystata each time you run your program. Aug 2010: no longer: i now reset it on each instance. so shouldn't happen.
self.txtNamesUsed += [tablenamel]
# Put a pointer in all models to let them know what table they're in (why? e.g. for subSampleAnalysis)
return (tablenamel)
###########################################################################################
###
def suestTests(self,
twomodels,
skipStata=False,
tablename=None,
modelSuffix=None):
### WaldCompareTwoModels
#######################################################################################
""" May 2011:
use '*storeestimates' in str2models to store previous two models in Stata.
Then call this to add a new model which will consist just of a bunch of tests: on each common coefficient and on all at once (Chow test)
This should return a model to add to a list of models...
twomodels (could be more?) should be able to be existing model dicts.
If tablename is given, it will ... no...
Okay, to make this work you need to use svy:reg. Otherwise it gets upset about using pweights OR clustering...
modelSuffix: beats the hell out of me. Stata is inconsistent. Kludge. ah!
(why wouldn't stataStoredName just be the same as stataModelName, which already exists?)
"""
snames = [
twomodels[0]['stataStoredName'], twomodels[1]['stataStoredName']
]
m1, m2 = twomodels
assert snames[0]
assert snames[1]
assert 'cluster' not in twomodels[0].get('regoptions', '')
assert 'cluster' not in twomodels[1].get('regoptions', '')
# Find commen variables. It would be nicest to do this from estcoefs, but that's maybe not available.
vlists = [[
mm for mm in twomodels[0]['model'].split(' if ')[0].split(' ')
if mm not in ['cons']
], [
mm for mm in twomodels[1]['model'].split(' if ')[0].split(' ')
if mm not in ['cons']
]]
depvars = [vlists[0][0], vlists[1][0]]
assert depvars[0] == depvars[1]
# Find common elements:
regressors = list(set(vlists[0][0:]) & set(vlists[1][0:]))
regressorsNoStars = [vv for vv in regressors if '*' not in vv]
if modelSuffix is None:
modelSuffix = ''
if isinstance(modelSuffix, str):
modelSuffix = [modelSuffix] * 2
assert isinstance(modelSuffix, list) and len(modelSuffix) == 2
##suestTest() in pystata??
statacode = ("""
*BEGIN SUEST TESTS TWOMODELS
suest %(sn0)s %(sn1)s
""" + '\n'.join([
"""
*CPBLWaldTest:""" + vv + """
test [%(sn0)s%(sfx1)s]""" + vv + """ = [%(sn1)s%(sfx2)s]""" + vv + """
""" for vv in regressorsNoStars
]) + """
*CPBLChowTest:
test [%(sn0)s%(sfx1)s = %(sn1)s%(sfx2)s]
*estimates drop
*END SUEST TESTS TWOMODELS
""") % {
'sn0': snames[0],
'sn1': snames[1],
'vvs': ' '.join(regressors),
'sfx1': modelSuffix[0],
'sfx2': modelSuffix[1]
}
commonFlags = dict([[a, b] for a, b in m1.get('flags', {}).items()
if a in m2.get('flags', {}) and b == m2['flags'][a]
])
assert not any(['ncome' in mm['depvar'] for mm in twomodels])
print ' Creating SUEST TESTs for models %s and %s ' % (m1['name'],
m2['name'])
return (dict(
special='suestTests',
name=r'$p$(equal)',
flags=commonFlags,
code={'after': statacode},
method='suest',
model=' '.join(regressors),
depvar=depvars[0]))
def duplicateAllModelsToDualBeta(self, models):
"""
Add a normalized (beta) version of each model immediately following it: only if it's OLS or xtreg (That's a weird thing; it would be normalizing the underlying variables, before taking first differences, etc).
"""
from copy import deepcopy
if isinstance(models, basestring):
models = self.str2models(models)
for imm in range(len(models))[::-1]:
newm = deepcopy(models[imm])
assert isinstance(newm, dict)
assert 'beta' not in newm['regoptions']
if newm['method'] in [
'svy:reg', 'svy:regress', 'reg', 'regress', 'rreg', 'xtreg'
]:
# For method in [rreg,xtreg], this "beta" will need to be removed later.
newm['regoptions'] += ' beta'
models.insert(imm + 1, newm)
return (models)
###########################################################################################
###
def regTable(
self,
tablename,
models,
method=None,
depvar=None,
regoptions=None,
variableOrder=None,
showonlyvars=None,
hidevars=None,
forceShowVars=None,
extralines=None,
comments='',
substitutions=None,
options='',
attributes=None,
landscape=False,
transposed=None,
combineRows=None,
suppressSE=False,
produceOnly=None,
extraTexFileSuffix=None,
doPcorr=False,
stopForErrors=True,
crcoefsVars=None,
skipStata=False,
skipLaTeX=False,
hidePSumTest=False,
defaultModel=None,
hideModelNumbers=False,
assignSaveCoefficientsPrefix=None,
hideModels=None,
showModels=None,
hideModelNames=False,
renumberModels=True,
showFailedRegressions=False,
multirowLabels=False,
betas=False,
followupFcn=None,
followupArgs=None,
showCompDiff=None,
returnModels=False,
postPlotFcn=None,
postPlotArgs=None,
autoCreateVars=True,
captureNoObservations=None,
skipReadingResults=False
): # Do a set of regressions; output results to .tex and .txt files
# retired options: useOUTREG2=False,
###
#######################################################################################
if stopForErrors == False:
print(
'******************* WARNING!!!!!!! WHY WOULD YOU USE STOPFORERRORS=FALSE??? tHIS IS SUPPRESSING ALL OUTPUT, INCLUDING ERRORS AND WARNINGS, FROM REGRESSIONS!*********** Use the autoexcludevars flag in the model struct instead!!! April2010: No, use replaceFailsWithDummy. No, use the object-level or table-level captureNoObservations'
) # Well, it should be called dummiesForEmptyRegressions, which is better.: capture reg adn then if _rc==2000, do dummy. (or 2001, insufficient obs)
1 / 0
"""
This is a core part of the stata LaTeX class. It generates Stata code to perform a series of regressions and it manages production of LaTeX tables in various formats to present the results from Stata (the guts of the LaTeX code generation are in a separate function).
Among its many features / abilities are:
- can display the coefficients of two variables on the same line. e.g. if different models use real or nominal income, these could for compactness both be displayed on one line as income.
- the order of displayed variables can be specified
- the order of any housekeeping information rows (e.g. R^2, N, etc) can be specified
- extra lines can be added to the table to be filled in by hand
- comments can be placed in the table caption
- more readable variable descriptions (including LaTeX code) can be substituted for the raw variable names
- columns (rows) denoting characteristics of the different models in the table can be generated (attributes=). These could show checkmarks or X's or other words. I do not understand what this parameter does. So I am deprecating it (Aug 2009)
- landscape or standard orientation may be specified or automatically chosen
- transposed or standard layout for the table (ie models as columns or rows) may be specified or automatically chosen
- standard errors can be shown or suppressed.
- P-values are calculated from standard errors and significance can be shown with stars or with coloured highlighting. This choice can be changed later (at any time) simply for an entire table through setting a LaTeX switch.
- When a similar model is run separately on each of several datasets (surveys), the resulting coefficients can be averaged over the different datasets. Output tables can be displayed in several modes: with or without the averages shown; with only averages shown, etc.
- dividing lines can be specified to separate groups of regressions or groups of covariates.
- can include special code to be run before or after each regression.
- as a very specialised feature, it can optimally combine a series of models which have a "drilling-down" series of spatial dummies. That is, I may run a model with geographic dummies at province level, then with metro level dummies, then city level dummies, then CT level, etc. This function can combine those into sensible inferred coefficients for an income effect at each level. This is obviously not a generally useful application, but it's built in here.
The set of models to run can be specified in two formats (see regTableOldForm). The modern one is as a list of Python dicts, with fields as specified below.
The argument regoptions includes code that should come after the variables to regress: typically, a weight specification, possibly followed by a comma and some Stata options., e.g. "cluster" "beta"..
N.B. if the "beta" option is given to Stata in a reg command, it is intercepted here, all the variables are normalised, and a robust weighted regression is done to create beta coefficients in place of the normal regression output. This results in the same coefficients as Stata produces, but allows full standard error calculation, etc.
betas: Alternatively, an entire table can be turned into betas (if it is OLS) by giving the betas=True option. If you want both raw and beta versions of each model, use the function duplicateAllModelsToDualBeta()
The argument "models" is a list of [dicts and lists of dicts]. Dicts have the following tags (and maybe more):
'name': model name,
If left blank, the dependent variable will be used for column headings in normal layout.
In transposed layout, the row model label (rowmodelname) will be blank unless this value is specified.
There's a "hideModelNames" switch, though not reimplemented yet may 2011: doing it now.
'model': variables and ifs,
'flags': dummy flags and condition names,
Some examples:
['controls'] -> gets a check in controls field
[['controls','some'] -> gets "some" in controls field
[['controls',0]] -> gets an "x" in controls field
[['controls',0],'homeowners~only']
(Actually, I think you can now send flags as a dict. -- nov2009)
'format': column format characters,
'code': with subfields: 'before', 'after', and others: Stata code to run before regression, Stata code to run after regression, but before exporting the results to a textfile with est2vec (now outreg2) (no, now raw log files). Subfield 'afterExport' can be used to run something else after the regression and export is complete. Subfield 'loadData' can be used to run code absolutely before anything else. So, keep track of rules regarding order here: ['loadData' < 'before' < 'afterExport'] and ['after',testsAfter,sumsAfter,existenceConditionAfter' (order of these?)] NEED TO INSERT HERE THE VARIOUS EXTRA STUFF THAT GETS ADDED: cell dummy code; beta code; etc, etc.
model['code']['cellDummiesBefore']+=stataBeforeOut
model['code']['cellDummiesAfter']+=stataAfterOut
'regoptions': Extra options to include in the regression command options (for instance, clustering).
'feGroup': contains the name/number of a collection of models to which progressively restrictive dummies have been added; the group should be analysed together to extract coefficients at each level. Actually, there can be whatever properties like this I want. U'm using "CRgroup"
'getSubSampleSums': a list of conditions, to be combined with "if e(sample)", for which to calculate sums of subsamples of the samples used in the estimation. These will get read from the Stata log file and be incorporated into the model dict as 'subsums' element, which can then be acted on by a followupFcn.
aspects like "depvar", "regoptions", "reg" can also be incorporated.
For now, there is a helper function above, to aid in the transition by translating to the dict form.
'compDiffBy'= incomeVariable : this will solicit the covariance matrix from the regression and calculate compensating differentials for all variables except the one supplied. (Aug 2009)
'compDiffVars'= list or string list of variables for which comp diffs should be calculated and/or displayed. Note that regTable has a table-level parameter "showCompDiff" which decides whether/how comp diffs should be shown.
'autoExcludeVars'=None: This says to fill in values (to -999) for all RHS variables which are missing for all records matching the regression's "if" condition. If a string is given in stead of None value, then that string will be used as teh if condition to fill in values.
#'skipNumber'=False: If set to true, this will not increment the latex-displayed number for this model. So This is supposed to be useful if I have a standardized beta version of a raw coefficients equation following the raw equation. I may want the number to indicate equation number, not estimate number...
The list models is passed here with nested structure (list of lists) but is flattened at the beginning. The grouping (for taking means over multiple surveys) is recorded in combineColumns. Also, for those sets of groups which will all be combined based on CR dummies, the grouping is recorded in dict fields "CRgroup".
## This replaces the old form, in which each models was a list with six elements:
## model:
## [0]: model name,
## If left blank, the dependent variable will be used for column headings in normal layout.
## In transposed layout, the row model label (rowmodelname) will be blank unless this value is specified.
## [1]: variables and ifs,
## [2]: dummy flags and condition names,
## Some examples:
## ['controls'] -> gets a check in controls field
## [['controls','some'] -> gets "some" in controls field
## [['controls',0]] -> gets an "x" in controls field
## [['controls',0],'homeowners~only']
## [3]: column format characters,
## [4]: Stata code to run before regression,
## [5]: Stata code to run after regression
## [6]: Extra options to include in the regression command options (for instance, clustering).
## [7]: Extra properties, in the form of a dict. This is used to mark groups of models for CR fixed effects, for example: The dict will have an element {
## )
## I should *really* change the above list to a dict, so that each element can be there or not. Property 7 can grow to subsume all the others, for example.
## New format: (use convertMtoDict() to switch to new format)
The argument "produceOnly=" can specify a mode to restrict the number of output tables. Right now, several tables are created when there are models which are summed over.
produceOnly='onlyraw' will make just one table per call: just the raw regressions.
produceOnly='means' will make just two tables per call: one with everything and one with just means.
produceOnly='withmeans' will produce just one table which shows everything.
produceOnly='crc' makes just the table of CR dummy coefficients on income (What happened to onlyCRC and withCRC???)
produceOnly='justmeans' does just means only.
The argument extralines is mostly used by other functions, rather than specifying by hand.. [jul2008].
crcoefsVars is an option to fine-tune the most exotic/specialised/obscure feature of this program, which is extracting cofficients from a series of regressions with different spatial dummies.
hideModels is a real low-level kludge which turns of post-Stata processing and LaTeX display of a subset of models. They are listed by their sequential index in the (otherwise) output, starting from 1. So between this and hidevars, one can simplify tables of disclosed results (ie I cannot rerun Stata once results are released from Statistics Canada, so I have to work with the output as is.)
7 April: Now a 6th element is code after the regression. For instance, to do a statistical test and add the results to r().
For the moment, I will just hardcode in here what I want it to do... the behaviour can be taken out of regseries and put into the 6th entry later.
4 April 2008: I've hardcoded "transposed=True" right now, but transposed='both' is default.
19 March 2008: a 5th element now exists: this is stata code to run before doing the regression..
# March 2008: Now a new 4th element of a model entry corresponds to either '' or '|'. The latter puts a vertical line after that column. It can be passed as a row by itslef (['|']) in the list of models. 2009OCtober: reinstated parsing '|' in list of models.
# MArch 2008: move hidevars functionality from stata to the reformatting. ie "showvars" can be used to pass extra things to stata, but only a subset of those (hidevars removed) will end up in python's latex output. Previously the stata est2vec call was in charge of removing hidevars.
# July 2008: The above March 2008 "showvars" is a really bad idea. I've renamed "showvars" to "variableOrder" and created another option, showonlyvars. [Note also: class-setting: self.variableOrder and module default defaultVariableORder]. So the parameters
- variableOrder (specify ordering in LaTeX of some of the first covariates, if they exist in the regression. It can also specify the order of flags/dummies listed, etc...... [latter part not done yet jul2008]),
- showonlyvars (specify order and exact set of covariates in LaTeX; this option is risky since it's nto explicit what is being suppressed), and
- hidevars [used to be called noshowvars] (suppress certain covariates or extrarows/stats info in the output)
- forceshowvars protects variables and properties from getting hidden due to being empty in all displayed models.
all apply to the generation of LaTeX output, *not* to what gets given to Stata. If you want to force extra lines (blank covariates) into the LaTeX output, you can use extralines (?).
regTable now also produces a latex version with just the non-addend and non-mean columns.
# 2008 Feb 29: Version 2 is born: this uses a text output from Stata (without any significance info) rather than the latex output. The LaTeX-sourced version is still available outside this class.
#
# Feb 2008: incorporated from standalone to a member of this statalatex class. This is so that it can use the object's texfilename when outputting stuff.
#
suppressSE=False says to include standard errors on every second line.
# If an element of the models array is an array rather than a string, then it is in the form ["name","dependentvars"] rather than just "dependentvars". The "name" is a column name.
# Other possible formats: ["", "dependentvars", ["booleanAttribute"]] or ["", "dependentvars", [["attribute","value"],["otherattribute","anothervalue"]]] ,...
# reg, depvar,regoptions must be scalar or have the same number of values as models:
#
# combineRows is a list of pairs or sets of variable names to combine into one (so both variables must not exist in the same model/column.), leaving/using the first name listed.
For instance, when I have both real and nominal incomes, I might want the coefficients to appear on the same row, and just have the difference signified with an indicator row later.
Columns can also be combined, which means something totally different. This takes a mean of each coefficient over several models (!). Typically, these models are identical except that they are run on different, similar surveys.
To initiate this feature, the simplest way is to group those rows in the model list together as one entry (ie a list of normal-looking rows).
Each row must be in full format, ie three elements (title, regressors, flags).
ie you can give a list of rows as one element in the list. Then these will be aggregated after separate regressions are done.
old description of this:
CombineColumns allows this routine to call the aggregateColumns method when it's done. This produces alternate versions with some columns averaged together. Useful for finding average results from similar regressions over multiple surveys.
Actually, a better way than using combineColumns is to put a element column in a model line. The fourth element, if present, indicates a group name for aggregation.
September 2008:
Values for method,depvar,regoptions can be specified for all the models at once using EITHER the individual optional parameters by the same name, or by puting those fields into defaultModel, which is a collection of default model features. The field 'flags' is an exception, in that it can be specified in both ways at once, as long as its member fields do not overlap.
assignSaveCoefficients='b_' would mean that estimated coefficients are saved to the samples used to estimate them. These will be named b_var where "var" is the name of the RHS covariate.
Only one of "hideModels" and "showModels" can be used. Each take a list of model numbers (as shown on a simple raw only table) and only display those ones.
If future versions, one will be able to provide names (list of strings) rather than 1-based indices, or other identifying features (dict, with fields identifying the model feature names and values listing the feature values.)
"hideModelNames" can be used to avoid showing the words associated with a particular model row/column. This is mostly useful to get rid of the ones that are automatically made in the various fancy functions (sums, CRC collection) etc.
Oct 2008: renumberModels: when subset of models seclected, renumber from (1). Sept 2009: this changed to default True. from default False
Dec 2008: In LaTeX output, failed models (ie/eg with r^2 or equivalent 0 or 1) are not shown unless "showFailedRegressions" is set to True.
Aug 2009: Completed betas=True mode
Aug 2009: followupFcn=f(thisLatexClass,models,followupArgs),followupArgs={'arg1':val1, ...etc). These are to REPLACE postplotfcn etc, which rely strangely on pairedRows etc. More generally, this can do anything it wants. It is a way to extend functionality of regTables, obviously. It is called (and some information maybe addedto followupArgs) after the estimation results have been read in. followupFcn can also be a list of function pointers if you want to do more than one, in sequence. But they all get the same followupArgs.
Aug 2009: showCompDiff= boolean (or could be "only", "aftereach" "atend") which decide whether comp diffs should be shown in the table. "only" would hide everything else. "aftereach" would intersperse comp diff cols with regressions coef cosl; "atEnd" would put them in a separate group at end of the same table. If it's not specified, but some comp diffs are asked for at the model level, then this should default to something. Say, "aftereach". Same if showCompDiff=True.
Oct 2009: returnModels= boolean : If set to True, then the models list of dicts that was passed will be updated to include all estimtes and modifications etc. This is useful if you want to use the results to make some more plots or etc (an alternative to followupFcn, I suppose).
Nov 2009: autoCreateVars=True/False: This will do a "capture gen var=0 " on all variable names to ensure that some specification incompatibility does not stop the regression from running. This makes it easier to run the same specification on multiple surveys. See by contrast/also, the model dict element "autoExcludeVars" which will do a similar thing (but only temporarily) for variables which are 100% missing just for the regression equation sample. (see above)
skipStata can be True, False, or 'noupdate'. The latter means only run Stata for a table if it does not already exist. Oh,wait. No. I take it back. I am killing this noupdate option now.
April 2010: captureNoObservations: allows regressions to be called which have no observations. It replaces such with a _dummyOne regression.
May 2011: I need to split this up into a display portion and a Stata portion... Or else make an option to skip reading from log files, ie to process model dicts whic may be modified for kludging, and not to regenerate results from Stata output.... hmmm. For now I'm just making a skipReadingResults option.
Aug 2011: New field of model struct: "isManualEntry". If you want to add/insert a custom row with minimal (no!) processing, set this to True.
Aug 2012: Now deals with regressors of form "i.var" (and even i.var#var2 etc) as follows: assume we're not interestd in the coefficients. Therefore, (1) hide it from estimate results (since estimates table doesn't use the variable names for such dummies) but also, let's not create it if it doens't exist (yet), because we want regressions to fail if not-shown variables are mistakenly absent.
Limitations:
It is only specifically savvy about regress (OLS), logit and ologit at the moment.
Should not be too hard to convert to using R, since the stata-specific parts are fairly well circumscribed or functionalised (e.g. reading Stata text output directly to get results, ugh.)
Bugs:
?
"""
if captureNoObservations == None:
captureNoObservations = self.captureNoObservations
if captureNoObservations == None: # Dec 2010: new default, since it's always an advantage.
captureNoObservations = True
if '_' in tablename: tablename = tablename.replace('_', '-')
if skipStata == True and self.skipAllDerivedTables == True:
print " REGTABLE: !! Skipping an entire table " + str(
(tablename, extraTexFileSuffix)
) + " because it is a derived table, not one that controls Stata. Set the latex.skipAllDerivedTables==False to correct this special behavoiure"
return ('')
assert not skipStata or not skipLaTeX # (really?)
if self.skipAllDerivedTables == True:
skipLaTeX = False
#
#print 'depvar:',depvar
#print 'models:' ,models
#print 'len models: ',len(models)
#print 'showvars:',showvars
import os
#from cpblUtilities import unique
#################### MEMBER / UTILITY FUNCTIONS FOR REGTABLE():
def parseFlags(flags):
"""Take the 'flags' element of a model dict and turn it into a dict. Return the dict. The dict is less compact, maybe, than, e.g. a list of things that are just "turned on", ie "Yes".
"""
if not flags:
return ({})
if isinstance(flags, dict):
for kk in flags:
if flags[kk] in ['yes', 'true', 'True', True]:
flags[kk] = r'\YesMark'
if flags[kk] in ['no', 'false', 'False', False]:
flags[kk] = r'\NoMark'
return (deepcopy(flags))
if isinstance(flags, str):
dictFlags = {flags: r'\YesMark'}
elif isinstance(flags,
list): # model[2] must be a list if not a str.
dictFlags = [[atts,r'\YesMark'] for atts in flags if isinstance(atts,str)] \
+ [atts for atts in flags if isinstance(atts,list) and isinstance(atts[1],str)] \
+ [[atts[0],r'\YesMark'] for atts in flags if isinstance(atts,list) and isinstance(atts[1],int) and atts[1]==1] \
+ [[atts[0],r'\NoMark'] for atts in flags if isinstance(atts,list) and isinstance(atts[1],int) and atts[1]==0]
return (dict(deepcopy(dictFlags)))
assert attributes == None # Deprecating this.
DO_NOT_DEEPCOPY = True # Warning!!! April 2010: I am changing things so that it's up to the caller to do a deepcopy before passing models to regTable. This is a good idea if some of the elements of the models might point to common objects, and these ojbects may be modified per model. But I need to be able to pass things like showModels and hideModels as pointers to elements of the original models list, so doing deepcopy messes things up. Hey, maybe I could test for redundancy by checking the memory length of oriinal and deepcopy of it? It different, there were some common pointers... [?Not done yet]
if DO_NOT_DEEPCOPY:
print '' + 0 * """ Warning: April 2010: I am eliminating deepcopying ... it's up to the caller now to do this in advance. Check your update() calls in making the models list for regTable."""
if variableOrder == None and showonlyvars == None:
if self.variableOrder is not None:
variableOrder = self.variableOrder
else:
variableOrder = defaultVariableOrder # This is a rather customised assumption okay for CPBL thesis work. Set the default, at top of this file, to an emtpy string (or disable this line) to get rid of the effect...
assert not (variableOrder and showonlyvars
) # Only one should be specified
# Could I not just set variableorder to showonly vars here, too? Trying that, Aug 2009:
if showonlyvars:
variableOrder = showonlyvars
assert extraTexFileSuffix or (
not skipStata == True
) or produceOnly # Otherwise this output .tex will overwrite the main one (though it may not be used). If you use "skipStata", provide the suffix to differentiate the LaTeX output.
if extraTexFileSuffix == None:
extraTexFileSuffix = ''
if followupArgs == None:
followupArgs = {}
if isinstance(models, dict):
print " CAUTION!!!!! IF you pass the models as a single dict, rather than a list of dicts, you will not receive back an updated (ie with estimates) version in the same pointer. It is better always to pass a list. OCtober 2009"
models = [models]
if models.__class__ in [str, unicode]:
models = self.str2models(models)
from copy import deepcopy
originalModels = models
# Well, then, given above, I don't undrstand why the following works. (oct 2009)
# April 2010: What is the following!!? I am aam turning this off if returnModels is true:
if not DO_NOT_DEEPCOPY and not returnModels:
models = deepcopy([mm for mm in models if mm]) # Ignore blanks
#produceOnly='crc'
possibleIncomeVars = 'lnIncome lnIndivIncome lnHHincome lnAdjHHincome da_lnavHHincome ct_lnavHHincome csd_lnavHHincome cma_lnavHHincome pr_lnavHHincome lnRHHincome da_lnRavHHincome ct_lnRavHHincome csd_lnRavHHincome cma_lnRavHHincome pr_lnRavHHincome da_lnavIndivIncome ct_lnavIndivIncome csd_lnavIndivIncome cma_lnavIndivIncome pr_lnavIndivIncome lnRIndivIncome da_lnRavIndivIncome ct_lnRavIndivIncome csd_lnRavIndivIncome cma_lnRavIndivIncome pr_lnRavIndivIncome ct_vm_lnHHincome ct_ag_lnHHincome ct_al_lnHHincome csd_vm_lnHHincome csd_ag_lnHHincome csd_al_lnHHincome cma_vm_lnHHincome cma_ag_lnHHincome cma_al_lnHHincome ' # Agh.. AdjHHincome should really be scaled? no. the thought experiment is raising everyone's income.
outreg2complete = True
possibleIncomeVars = (possibleIncomeVars + possibleIncomeVars.replace(
'_', '-')).split(' ')
#self.append('\n'+[r'\clearpage\section',r'\subsection'][int(skipStata==True)]+'{%s (%s) ~[%s]}'%(tablename.replace('_','~'),extraTexFileSuffix.replace('_','~'),produceOnly))
self.append(r"""
\clearpage \newpage \clearpage
""") # Don't have figures etc mixed between sections of relevant tables.
assert not skipLaTeX or not extraTexFileSuffix
if self.compactPreview and not skipLaTeX:
self.append('\n%' + [r'\section', r'\subsection'
][int(skipStata == True)] + '{%s (%s) ~[%s]}'
% (tablename.replace('_', '~'), extraTexFileSuffix.
replace('_', '~'), produceOnly) + '\n')
elif not skipLaTeX:
self.append('\n' + [
r'\section', r'\subsection'
][int(skipStata == True)] + '{%s (%s) ~[%s]}' % (tablename.replace(
'_', '~'), extraTexFileSuffix.replace('_', '~'), produceOnly))
# Flatten structure of list of models:
# Until Sept 2009, the way to denote mean-groups (groups of models to take a mean over) was by grouping them together in a sub-list. But if instead the "meanGroupName" fields are set in consecutive models, this could be used to construct the groups. I must continue to support the old method, and avoid mixing them?...
# Algorithm at the moment is to flatten the list first (taking note of implied groups), and then look for the sumGroupNames afterwards.
# I've enforced that meanGroupNames specified outside this function must be strings, while automatic markings herein are integers.
#
# Find expanded length of models: some rows can be wrapped up in to-aggregate groups:
# Also, some "models" are actually just strings that mark a request for a separator in the table. No! The latter should not be true anymore. separators are noted within model dicts. [Oct 2009: Why?! I am reinstating this string feature.]
def parseModelSeparators(mods):
iSeparators = [
ii for ii in range(len(mods))
if isinstance(mods[ii], str) and mods[ii] == '|'
]
if 0 in iSeparators:
print '***** Bug: not sure how to deal with separator at beginning of a group!'
iSeparators.pop[0]
for ii in iSeparators[::-1]:
if not isinstance(mods[ii - 1], dict):
foiu
print '***** Bug: not sure how to deal with separator right after a group!' # Maybe ignore it? Sicne there will be one anyway..?
else:
mods.pop(ii)
assert not 'format' in mods[ii - 1]
mods[ii - 1]['format'] = 'c|'
return ()
fullmodels = []
combineColumns = []
#if transposed==None:
# transposed=True
sumGroup = 0 # Label for sumGroups.
parseModelSeparators(models)
for row in models:
if isinstance(row, list) and isinstance(
row[0],
dict): # If this "model" is really a group of models
# We've found a group. Are they also labelled with group name? If so, they must all be the same:
if any(['meanGroupName' in model for model in row]):
assert all([
'meanGroupName' and
model['meanGroupName'].__class__ in [str, unicode]
for model in row
])
# First, go through this group and look for any strings (not done yet : june 2008. maybe drop this feature.)
parseModelSeparators(row)
fullmodels += row # Add them all as separate entries.
if len(
row
) > 1: # If there is more than one model in this group of models
sumGroup += 1 # Label for new group of models to sum
for mm in row:
assert isinstance(mm, dict)
if not 'meanGroupName' in mm:
mm['meanGroupName'] = str(
sumGroup
) # Hm.. I am using meanGroupName for both numbers (auto) and string (named) format.
print(
' Found group "%s" of %d regressions whose coefficients I should aggregate...'
% (mm.get('meanGroupName', str(sumGroup)), len(row)))
else:
assert isinstance(row, dict)
fullmodels += [row]
alreadyProcessedMeans = [
imm for imm, mm in enumerate(fullmodels) if 'isMean' in mm
]
if alreadyProcessedMeans and not skipReadingResults:
##assert 0 # just saw this: should integrete skipreadingfile or whatever it's called, option? skipReadingResults
print ' It looks like you have passed an alread-processed set of models.. Dropping %d synthetic mean models out of a passed list %d long... Dropping: ' % (
len(alreadyProcessedMeans), len(fullmodels)) + str(
[fullmodels[imm]['name'] for imm in alreadyProcessedMeans])
for iimm in alreadyProcessedMeans[::-1]:
fullmodels.pop(iimm)
# June 2011: should I drop compdiffs here too??
if not models:
print(' regTable found empty models. Aborting...')
return ('')
assert models
assert fullmodels
models = fullmodels
nModels = len(models)
debugprint('--> combineColumns: ', combineColumns)
# Check and warn if models include pointer duplicates:
for im, mm in enumerate(models):
for imm in range(im + 1, len(models)):
# N.B. use of "is" (object identity), not "==" (value identity / equivalence, defined by whatever is .__eq__) in the following:
if models[im] is models[imm]:
print ' ** Warning!!! Your list of models includes duplicate pointers (%dth=%dth). Is that intentional?... If so, you might want to use deepcopy. ' % (
im, imm)
1 / 0
# Now also look to see whether "meanGroupName"s have been specified.
#if sumGroup
for imm in range(len(models)):
models[imm][
'modelNum'] = imm + 1 # An innovation Sept 2008: This ought to simplify some later stuff, no.??. Rewrite all the model/calc numbering, through this functoin. Should do same with the name.
# Check for separator indicators: simple strings mixed in amongs the dicts. Also add format field # (Obselete...)
for irow in range(len(models))[::-1]:
##if isinstance(models[irow],str) and len(models[irow])>1: # not a separator; convert 1element to 4 element
## models[irow]={'model':models[irow]} # Probably need to fill in other fields here
if 0 and isinstance(models[irow],
str): #'format' not in models[irow]:
""" Maybe drop this feature."""
print('Found format string? ', models[irow])
models[irow - 1]['format'] = 'c|'
models.pop(irow)
nModels += -1
elif not 'format' in models[irow]:
models[irow]['format'] = 'c'
if 0: # I don't think I am using these separators yet. Add that feature later.
# Look for vertical line indicators. These can be lines by themselves. When they are, move them to the 4th element of previous row.
# Also, enforce (override) vertical separators around any combineColumn groups. I guess I'll need to remake the modelTeXformat for each kind of output file I make.
# First, just modify the models array so that the 4th element contains a format code:
for irow in range(len(models))[1:][::-1]:
# Either the 4th element of a row, or the entirety of the following row, can both indicate a separator:
if (isinstance(models[irow], str) and models[irow] == '|'
) or irow in [cc[0] for cc in combineColumns]:
models[irow - 1][3] = 'c|'
if irow in [cc[-1] for cc in combineColumns]:
models[irow][3] = 'c|'
# Expand some other arguments that can be strings or lists: THESE SHOULD BE MOVED INSIDE THE DICTS.. Oct 2009: it already is, via defaultModel, below.
if 0 and isinstance(method, str):
for mm in models:
assert 'method' not in mm
mm.update({'method': method}) # This overwrites.
# Some of the optional arguments to regTable can all be dealth with using defaultModel:
if not defaultModel:
defaultModel = {}
defaultArgs = [['depvar', depvar], ['method', method],
['regoptions', regoptions]]
for defarg, argval in defaultArgs:
assert not (argval and defarg in defaultModel
) #Can't specify value through both forms
if argval:
defaultModel[defarg] = argval
# The following (Sept 2008) makes redundant a whole bunch of oother flags, like the "depvar" to follow.
if defaultModel:
self.updateToDefaultModel(models, defaultModel)
# Hm. What about defaults that I just want to fill in as a backstop? ie it's okay if some exist; I just won't overwrite those. ie. without the "assert" above
for amodel in models:
if 'name' not in amodel and not hideModelNames:
amodel['name'] = amodel['depvar']
###plainSubstitutions=substitutions
if substitutions == None:
substitutions = self.substitutions #standardSubstitutions
# One must never use mutable objects (like lists) as default values in a function definition.
# (SEe http://effbot.org/pyfaq/why-are-default-values-shared-between-objects.htm and
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/502206 )
# So, set them to "None" in the definition, and then redefine them at runtime here:
#if not substitutions:
# substitutions=[]
if not attributes:
attributes = []
if not extralines:
extralines = []
if not hidevars:
hidevars = []
elif isinstance(hidevars, str):
hidevars = [hh for hh in hidevars.split(' ') if hh]
if not forceShowVars:
forceShowVars = []
# Get a non-redundant list of all the variables to be included
# (if it is not overridden by passed value). IT would be nice
# if the order were in order of appearance.
# Oogh. To do this properly now, I need to ignore everything
# after " if " in each model spec, and to look for both forms
# (string, list) of the model specs.
# This line was buggy when "if" not exist: kill it (july 2008): allspecs=[model['model'][0:model['model'].find(' if ')] for model in models]
# def getModelSpecification(am):
# if am['method'].startswith('ivreg'):
#
# 2015 June. Following seems not used
# allspecs=[model['model'].split(' if ')[0] for model in models if 'model' in model] # For 'isMean', there would be no 'model'
# allCovariates= ' '.join( uniqueInOrder(re.split(' ',' '.join(allspecs))) )
tablenamel = self.generateLongTableName(tablename, skipStata=skipStata)
for ttt in models:
ttt['tableName'] = tablenamel
import time
from cpblUtilities import dgetget
# Reset output for this table by erasing the .txt file:
#outs="""
#*capture erase "%s%sor"
#"""%(defaults['paths']['stata']['tex'],tablenamel)
outs = ''
print """regTable(): Initiated "%s"(%s) %s with %d models in %d groups. """ % (
tablenamel, extraTexFileSuffix, skipStata * ' in skipStata mode ',
len(models), len(
[omm for omm in originalModels if omm not in ['|']])),
tableLogName = defaults['paths']['stata']['tex'] + tablenamel + '.log'
tableLogNameWithDate = defaults['paths']['stata'][
'working'] + 'logs/' + tablenamel + time.strftime(
'%Y_%m_%d_%H%M%S_') + '.log'
if self.skipStataForCompletedTables and os.path.exists(tableLogName):
if not skipStata:
print ' Skipping Stata for %s because latex.skipStataForCompletedTables is set ON!!!! and this table is done.' % tablenamel
outs += """
"""
skipStata = True
outs += """
log using %s, text replace
* CPBL BEGIN TABLE:%s: AT %s
""" % (tableLogNameWithDate, tablename,
time.strftime('%Y_%m_%d_%H%M%S'))
"""
Need notes here: what is accomplished? What starts here?
"""
for im in range(nModels): #regressors in models:
if not DO_NOT_DEEPCOPY and not returnModels: # In case of returnModels, following security insurance is not present.
models[im] = deepcopy(
models[im]
) # Ensure against overwriting parts of other models (is there ever a case when I want to??)
model = models[im]
if 'isManualEntry' in model:
continue
if 'code' not in model:
model['code'] = dict(before='', after='')
if not 'before' in model['code']:
model['code']['before'] = ''
if not 'after' in model['code']:
model['code']['after'] = ''
if ' if ' not in model['model']:
#assert '[' not in model['model']
model['model'] += ' if 1'
outs += """
* CPBL BEGIN MODEL:%s_%d:
""" % (tablename, im + 1) + """
* ie Start of estimate %d (for table "%s")
""" % (im + 1, tablename)
assert len(model['model'].split(
' if ')) < 3 # Did you write more than one if in your model?
# def multireplace(ss,adict,onlyIf=True):
# if not onlyIf: return(ss)
# for cfrom,cto in adict.items():
# ss=ss.replace(cfrom,cto)
# return(ss)
# def removeIVREGchars(ss,method=None):
# return( multireplace(ss,{'(':'',')':'','=':' '},onlyIf='ivreg' in method) )
# def extractRHS(model)
# # For case of ivreg, remove =,(,). For case of Stata's factor varialbe notation, ...
if 0:
RHSvarsNoWildcards = ' '.join([
vv
for vv in removeIVREGchars(
model['model'].split(' if ')[0]).split(' ')
if vv and '*' not in vv
])
# At this point, all models should be multi-field'ed dicts.
###if isinstance(model,list): # Use extra features....
if model['name']: # So first element can be empty; then it is ignored
model['stataModelName'] = model[
'name'] # Used to be columnName[im]
#rowModelNames[im]=model['name']
# New method: just store modified flags in model:
model['textralines'] = parseFlags(model.get('flags', None))
modelregoptions = model.get('regoptions', ',robust' *
(method not in ['rreg']))
assert betas in [None, False, True]
doBetaMode = False
if 'beta' in modelregoptions or betas == True:
assert model['method'] in [
'svy:reg', 'svy:regress', 'reg', 'regress', 'rreg', 'areg',
'xtreg'
] # Can I Adding xtreg??
doBetaMode = True
model['textralines'][r'$\beta$ coefs'] = r'\YesMark'
dsetset(model,['flags','beta'], True)
if ('cluster' in modelregoptions and 'beta' in modelregoptions
) or model['method'] in ['rreg', 'xtreg']:
#print 'Replacing all "beta"s in :'+modelregoptions
modelregoptions = modelregoptions.replace('beta', '')
if doBetaMode:
assert 'compDiffBy' not in model # You can't call comp diffs with beta mode..
assert 'getSubSampleSums' not in model
if '[pw=' in modelregoptions or '[pweight=' in modelregoptions:
debugprint(
"""Caution! Switching to analytic weights for beta calculation. This reprodcues Stata's own "beta" coefficients when normalisation is done over the right sample, etc."""
)
modelregoptions = modelregoptions.replace(
'[pw=', '[w=').replace('[pweight=', '[w=')
from cpblUtilities import flattenList
# Following section can be rewritten, since these are mostly not used any more. Write their specific purposes here, where you creat them. [aug2012 --> [] ]
# make new lookups under RHS (lists) and RHSs (strings)
RHS = {}
# (Before 2015, we used to Remove the constant term from the actual stata call. Not sure why. Stopped doing that.)
RHS['rawWithIf'] = model['model'].split(' if ')[0] + ' if ' + (
' 1' if 'if' not in model['model'] else
model['model'].split(' if ')[1])
RHS['ifCondition'] = RHS['rawWithIf'].split(' if ')[1]
###RHS['raw']=''.join([cc for cc in (model['depvar'] +' '+ re.sub('".*?"','',regressors)).replace('<',' ').replace('>',' ').replace('=',' ').replace('&',' ').split(' ') if cc and cc[0].isalpha() and not cc in ['if','samp','sample']]).split(' ') # >='A' and cc[0]<='z'
###RHS['rawBeforeIf']=RHS['rawWithIf'].split(' if ')[0].split(' ')
RHS['cleanedBeforeIf'] = [
ss
for ss in re.sub('\(|\)|=', ' ', RHS['rawWithIf'].split(
' if ')[0]).split(' ') if ss
] # Deals with ivregress syntax.. This is now a list of words.
RHS['mentionedBeforeIf'] = [
ss
for ss in re.sub('\(|\)|#|=', ' ', RHS['rawWithIf'].split(
' if ')[0]).split(' ') if ss
] # Deals with ivregress syntax, and interactions notation. Leaves i.var as is. NOT USED?
RHS['inConditions'] = uniqueInOrder(''.join([
cc
for cc in (re.sub('".*?"', '', RHS['rawWithIf'].split(
' if ')[1])).replace('<', ' ').replace('>', ' ').replace(
'=', ' ').replace('&', ' ').split(' ')
if cc and cc[0].isalpha() and not cc in
['if', 'samp', 'sample']
]).split(' '))
# Assume there are no wildcards or etc after the "if"
# What about weight var? where is that?
RHS['inIndicators'] = flattenList([
av[2:].split('#') for av in RHS['cleanedBeforeIf']
if av.startswith('i.')
])
RHS['inInteractions'] = flattenList([
av.split('#') for av in RHS['cleanedBeforeIf']
if not av.startswith('i.')
])
RHS['wildcards'] = [
av for av in RHS['cleanedBeforeIf'] if '*' in av
]
RHS['inConditions'] = []
#RHS['includingWildCardsAndInteractions']=[av[2:] if av.startswith('i.') else av for av in RHS['rawBeforeIf']]
# Only single interactions, ie like xxx#yyy and not xxx##yyy, are recognised so far:
RHS['simplest'] = flattenList(
[
av for av in RHS['cleanedBeforeIf']
if '*' not in av and not av.startswith('i.') and '#' not in
av
],
unique=True)
# Now, compile some lists:
RHSs = {
'used': ' '.join(RHS['simplest'] + RHS['inConditions'] +
RHS['inIndicators'] + RHS['inInteractions'])
}
# Do not auto-create variables in f.e. indicators, since we are not going to display the results and therefore notice that they're missing.
if 0:
# Check that we're not feeding a redundant list to corr (in accounting mode) later down the line:
redundantRegressors = ' ' + deepcopy(regressors) + ' '
for vvv in uniqueInOrder(regressors.split(' ')):
redundantRegressors = redundantRegressors.replace(
' %s ' % vvv, ' ', 1)
#uniqueRegressors= ' '.join(
if redundantRegressors.strip(
): #not uniqueRegressors==regressors:
print 'CAUTION!!! list of regressors contains redundant term!!! Eliminating redundancies: ', redundantRegressors.strip(
)
regressors = ' '.join(uniqueInOrder(regressors.split(' ')))
RHSs['autoCreate'] = ' '.join(RHS['simplest'] + RHS['inConditions']
+ RHS['inInteractions'])
RHSs['zScoreVars'] = ' '.join(
[model['depvar']] + RHS['simplest']) #+RHS['inInteractions'])
RHSs['suppressEstimates'] = ' '.join([
rhsv for rhsv in RHS['cleanedBeforeIf']
if rhsv.startswith('i.') and rhsv not in forceShowVars
])
assert not any(['county' in ff for ff in forceShowVars])
assert not forceShowVars
# Following fails if there are redundant (repeated) RHS variables here...
regressionLine = 'capture noisily ' * (
captureNoObservations == True or not stopForErrors
) + model['method'] + ' ' + model['depvar'] + ' ' + RHS[
'rawWithIf'] + ' ' + modelregoptions
self.variablesUsed += ' ' + model['depvar'] + ' ' + RHSs['used']
assert 'code' in model
""" Nov 2009: Oh dear. I am afraid there's no single sensible ordering of where in the 'before' code to insert various things. I may need more direction from user/caller. For now, I'll let the user's "before" code go first, since it might load files, etc. And I'll append all my things (modes, below) in order... [NOT] [See documentation for regTable. 'loadData' is the very first.
Problems: for instance, when I use withCRdummies or etc, it inserts code. But the autocreatevars needs to happen BEFORE that. What I am going to do is to allow more than just "before" and "after". Use "loadData" to come at the very beginning?
So I'll put the two auto things below at the very beginning of "before" and will likely need to fix some calls to regTable to make use of "loadData"...
"""
if 'autoExcludeVars' in model:
""" e.g. consider if the "if" clause identifies one country. Then this would set all values for just that country to -999. Use carefully, because if you mix subsets later, you might get some -999s mixed with real values... (or this should keep track and undo its work... Currently, it does: it uses a "restore" later to undo its work."""
if model['autoExcludeVars'] in [
True, ''
]: # ie its value is None or '' ...?April2010: no... caution: if ==None, this will not work.. (mod'ed april 2010)
preIf, postIf = regressors.split(' if ')
model['autoExcludeVars'] = postIf
model['code']['autoExcludeVarsBefore'] = """
* ENSURE THAT THE "IF" CLAUSE DOES NOT TURN ANY GENERALLY NON-MISSING VARIABLES INTO ALL-MISSING:
preserve
foreach var of varlist """ + RHSs['autoCreate'] + """ {
quietly sum `var' if """ + model['autoExcludeVars'] + """
if r(N)==0 {
replace `var'=-999 if """ + model['autoExcludeVars'] + """
di " CAUTION!!!!!!!! Replaced `var' values in MODEL:%s_%d:""" % (
tablename, im + 1) + """, since they were all missing!"
}
}
"""
# RHSvarsNoWildcard '
if autoCreateVars and not model.get('special', '') == 'suestTests':
model['code']['autoCreateVarsBefore'] = """
* ENSURE THAT ALL VARIABLES IN THE EQUATION EXIST BY SETTING MISSING ONES TO ALL ZEROs:
foreach var in """ + RHSs['autoCreate'] + """ {
capture confirm variable `var',exact
if _rc~=0 {
di " CAUTION!!!!!!!! `var' DID NOT EXIST in MODEL:%s_%d:""" % (tablename, im +
1) + """!"
capture gen `var'=0
}
}
"""
# "
if doBetaMode: # This "if" section is continued from above.
if RHS['wildcards']: #'*' in thisRegRHSvars: # Can't have wildcards in this beta mode.. or else could drop them here.
""" Why drop them? Just leave them, ut their coefficients won't be betas. March 2013"""
print(
'regTable: CAUTION! TRYING A SAMPLE SELETION FOR A BETA REGRESSION, BUT THERE ARE WILDCARDs OR v12 INTERACTIONS OR V12 INDICTORS IN SOME VARIABLES. THESE WILL BE LEFT IN, BUT NOT NORMALIZED! ',
RHS['wildcards']
) #[ww for ww in thisRegRHSvars.split(' ') if '*' in ww])
#LthisRegVarsUsed_NoWildCards= [vv for vv in thisRegVarsUsed.split(' ') if vv and not '*' in vv]
# Make sure to use proper weights in zscore!.
zscoreVars = RHSs[
'zScoreVars'] #' '.join([vv for vv in (model['depvar'] +' '+ regressors +' '+ modelregoptions).split(' if ')[0].split(' ') if vv and not '*' in vv])
zscoreLine = 'zscore ' + ' ' + zscoreVars
#zscoreLine= 'zscore '+' '+ ' '.join(LthisRegVarsUsed_NoWildCards)+' if '+ (regressors +' '+ modelregoptions).split(' if ')[1].replace(' if ',' if z_dSample & ') #model['depvar'] +' '+ regressors
#if ' if ' in zscoreLine:
#zscoreLine=zscoreLine.replace(' if ',' if z_dSample & ')
#else:
# zscoreLine=zscoreLine+' if z_dSample '
zscoreLine += ' if z_dSample & ' + RHS['ifCondition']
#zscoreLine+='\n'+ '\n'.join(["""
#*sum z_%s
#*if r(N)==0 {
# *replace z_%s=1
# *}
#"""%(vv,vv) for vv in thisRegVarsUsed.split(' ') if vv and not '*' in vv])+'\n'
# Oct 2009: I am removing the following for the moment. I have had to put z_dSample back in the zscore line to get the right normalisation. So it is important to call this only with variables that exist....
# LAter: OCt 2009: I am putting it back. The following gets around bug in zscore which sets as missing variables that have no variation. So the following should always come along with a zscore call. So, I am now implementing a new way to ensure that only existing variables are called.
zscoreLine += """
foreach var of varlist %s {
quietly sum `var'
if r(N)==0 {
di "Filling in values for `var', which is all missing"
replace `var'=0
}
}
""" % (' '.join([
'z_%s' % vv for vv in zscoreVars.split(' ')
if vv and not '*' in vv
]))
# """ " ` "
if combineRows == None:
combineRows = []
combineRows = [[vv, 'z_' + vv] for vv in zscoreVars.split(' ')
if vv and not '*' in vv] + combineRows
debugprint(
'regTable: USING BETA MODE FOR THIS REGRESSION: dropping all z_* variables'
)
# Heavily modify the regression call:
##########3preIf,postIf=regressors.split(' if ')
regressionLine = 'capture noisily ' * (
captureNoObservations == True or not stopForErrors
) + model['method'] + ' z_' + model['depvar'] + ' ' + ' '.join(
[('*' not in vv and '#' not in vv and
not vv.startswith('i.')) * 'z_' + vv
for vv in RHS['cleanedBeforeIf'] if vv
]) + ' if ' + RHS['ifCondition'] + ' ' + modelregoptions
# Also, set up the zscore normalisation, etc.
model['code']['before']+="""
capture drop z_*
gen z_dSample = """+ ' & '.join(['%s<.'%vv for vv in zscoreVars.split(' ') if vv and '*' not in vv])+ ' & '+RHS['ifCondition']+'\n'+zscoreLine+"""
"""#+model['code']['before']+'\n'
model['code']['after'] += """
capture drop z_*
"""
if 0:
assert 'dCountry*' not in model['code'][
'before'] # Why? March2013: This was not commented. So I don't understand it and I'm removing it for now.
###outs+='\n'+regressionLine+'\n' "
if captureNoObservations == True and not model.get(
'special', '') == 'suestTests':
model['code'][
'existenceConditionBefore'] = '* ALLOW FOR POSSIBILITY THAT THERE ARE NO OBSERVATIONS MEETING THIS CONDITION... REPLACE REGRESSION WITH DUMMY REGRESSION IN THIS CASE, AND KEEP GOING. (captureNoObservations==True)'
model['code']['existenceConditionAfter']="""
if _rc==2000 |_rc==302 |_rc==2001 {
capture gen _dummyOne=1
reg _dummyOne _dummyOne
}
else {
if _rc!=0 {
di _rc
di foodle untrapped error! (following line should stop execution if reached)
error _rc
}
"""+0*(regressionLine.split('capture')[1])+"""
}
""" # No loonger, above, need to repeat regression, since used capture NOISILY
# New feature August 2009: Create sub-sample sums
# I've hard-coded this to use "weight" as a weight.
# But this needs to use "mean", not "sum"!! (done: Jan 2010)
if returnModels and not substitutions == self.substitutions: # Mod'd 2010 Sept; will need to update accounting code to use self.subs...?
model[
'substitutions'] = substitutions # Will be needed for formatting output of accounting analysis
if 'getSubSampleSums' in model: # sept 2010: leave this override in for now?
model[
'substitutions'] = substitutions # Will be needed for formatting output of accounting analysis
# 2010 Jan: Need to use "mean", not "sum", so do both for now, and work on parsing the "mean" output
model['code']['sumsAfter'] = """
""" + '\n'.join([
"""
*BEGIN MEAN LIST MULTI
mean """ + model['depvar'] + ' ' + RHSvarsNoWildcards + 0 *
thisRegVarsUsedNoWildcard +
' [pw=weight] if cssaSample & (' + ifs + """),
*END MEAN LIST MULTI
*BEGIN SUM LIST MULTI
sum """ + model['depvar'] + ' ' + RHSvarsNoWildcards + 0 *
thisRegVarsUsedNoWildcard + ' [w=weight] if cssaSample & ('
+ ifs + """), separator(0) nowrap
*END SUM LIST MULTI
""" for ifs in model['getSubSampleSums']
]) ##+model['code']['after']
if 'autoExcludeVars' in model:
model['code']['autoExcludeVarsAfter'] = """
restore
"""
if not model.get('special', '') == 'suestTests':
# An optional argument is this stata code to run before the regression!
outs += '\n' + dgetget(model, 'code', 'loadData', '')
outs += '\n' + dgetget(model, 'code', 'cellDummiesBefore', '')
outs += '\n' + dgetget(model, 'code', 'autoCreateVarsBefore',
'')
outs += '\n' + dgetget(model, 'code', 'autoExcludeVarsBefore',
'')
outs += '\n' + dgetget(model, 'code', 'before', '')
outs += '\n' + dgetget(model, 'code',
'existenceConditionBefore', '') + '\n'
#If stopForErrors=False has been chosen, then do the regression so that if it fails (no observations), it will be replaced by a dummy regression. This is similar to my method for cell controls, but simpler.
outs += regressionLine + '\n'
outs += '\n' + dgetget(
model, 'code', 'existenceConditionAfter', ''
) + '\n' # This provides a dummy regression if some existence condition is not met. For example, it's used in national-level regressions in regressionsDunn2010.py. Note, it can also get created automatically/internally by regTable if captureNoObservations is used.
outs += '\n' + dgetget(model, 'code', 'cellDummiesAfter',
'') + '\n'
if 'subSumPlotParams' in model:
model['subSumPlotParams']['comments'] = model[
'subSumPlotParams'].get(
'comments', '') + r'\ctDraftComment{' + str2latex(
regressionLine) + '} '
# Now also run the after-regression code required, for instance to do some statistical tests:
incomeVars = [
iii for iii in possibleIncomeVars
if iii and iii in RHS['mentionedBeforeIf']
]
if incomeVars: # Careful. It's actually possible, with semiparametric [ie rolling regression.], for an income variable to be dropped as collinear, ie everyone has same income.
outs += '\n capture test 0=%s\n' % ('+'.join(incomeVars))
# And save all coefficients to the dataset, if so requested: (yuck... can't I just save them using estimates store and refer to them that way!!)
if assignSaveCoefficientsPrefix:
assert ' if ' in regressors
pieces = regressors.split(' if ')
#if len(pieces)==1:
# pieces+=[' 1 ']
for var in [
vv for vv in pieces[0].split(' ')
if vv and '*' not in vv
]:
outs += '\nreplace %s%s=_b[%s] if %s\n' % (
assignSaveCoefficientsPrefix, var, 'z_' *
(doBetaMode) + var, pieces[1])
outs += '\nreplace s%s%s=_se[%s] if %s\n' % (
assignSaveCoefficientsPrefix[1:], var, 'z_' *
(doBetaMode) + var, pieces[1]
) # old jujnk:'beta' in modelregoptions
""" Now, do immediately-after-regression stuff:
(1) display regression results
(2) display regression variance-covariance results
(3) record e(sample) as cssaSample, for use with making associated stats (means) later.
"""
if not model.get('special', '') == 'suestTests':
dropIndicators = ' '.join([
ss for ss in RHSs['suppressEstimates'].split(' ')
if ss and 'partial(' + ss + ')' not in model['regoptions']
])
outs += """
capture drop cssaSample
gen cssaSample=e(sample)
estimates table , varwidth(49) style(oneline) b se p stats(F r2 r2_a r2_p N N_clust ll r2_o """ + (
'ivreg2' in model['method']
) * 'jp idp widstat' + ') ' + 0 * (
'drop(' + dropIndicators + ')'
if dropIndicators else '') + """
* ereturn list
""" % ()
if 'compDiffBy' in model or 'getSubSampleSums' in model:
outs += stataElicitMatrix('e(V)')
# An optional argument is this stata code to run after the regression. I really have no idea which should come first, the externally-specified "after" or the sumsAfter. Really, I should use more specific elements than general "after" so that I can order them sensibly.
outs += '\n' + model['code']['after'] + '\n'
outs += '\n' + dgetget(model, 'code', 'testsAfter', '') + '\n'
outs += '\n' + dgetget(model, 'code', 'autoExcludeVarsAfter',
'')
if dgetget(model, 'code', 'sumsAfter', ''):
outs += '\n' + model['code']['sumsAfter'] + '\n'
if 0 and 'needCovariance':
outs += """
matrix list e(V),nohalf
* END: matrix list e(V)
"""
if 'getSubSampleSums' in model:
outs += """
* Generate a covariance matrix for regressors and display it. This is going to be used as an approximate for the cov matrix for subsets of the full sample, too. (Jan 2010: What about pweights?. Ah. Jan 2010, now incorporated pweight into following:)
matrix accum R = %s [pweight=weight] if cssaSample, nocons dev
matrix sampleIndepCorr = corr(R)
matrix list sampleIndepCorr,nohalf
* END: matrix list sampleIndepCorr
""" % (model['depvar'] + ' ' + thisRegRHSvars
) #(model['depvar']+' '+RHSvarsNoWildcards)#thisRegRHSvars#thisRegVarsUsed
if doPcorr: # Note: This stupidly now calls the before and after, which is no longer compatible in general (see semiparametric)...
# So in Aug 2008 I've turned off the pcorr function by default. Not sure I ever really used it anyway. Well, actually, I've changed semiparam so it may be compatible again, but I am not using pcorr..
outs += '\nlog using %s.pcorr_%d.txt,append text\n' % (
defaults['paths']['stata']['tex'] + 'pcorr/' + tablenamel,
im)
# An optional argument is this stata code to run before the regression.
if 'code' in model:
outs += '\n' + model['code']['before'] + '\n'
#if len(model)>4:
# outs+='\n'+model[4]+'\n'
outs += 'capture ' * (
not stopForErrors
) + 'pcorr ' + model['depvar'] + ' ' + regressors + ' \n'
# An optional argument is this stata code to run after the regression.
if 'code' in model:
outs += '\n' + model['code']['after'] + '\n'
#if len(model)>=6:
# outs+='\n'+model[5]+'\n'
outs += '\nlog close\n'
# An optional argument is this stata code to run after the regression and the results export.
if 'code' in model and 'afterExport' in model['code']:
outs += '\n' + model['code']['afterExport'] + '\n'
if model.get('special', '') == 'suestTests':
outs += model['code']['after']
outs += """
* CPBL END MODEL:%s_%d:
""" % (tablename, im + 1)
outs += """
* CPBL END TABLE:%s:
log close
* Since we've successfully finished, set the preceeding log file as the conclusive one
copy "%s" "%s", replace
""" % (tablename, tableLogNameWithDate, tableLogName)
# Undo the normalistion of variables, if it was done earlier:
#if 0 and normaliseForBeta:
# outs+="""
# * UNDO NORMALISATION OF ALL VARIABLES FOR THIS TABLE
# use %s,clear
# """%betaFN
# Rename completed output file to a more unique / complete name
# This is useful for making different versions of output. Run Stata once; show different rows/cols in separate incantations of regTable.
# Here is a chance to reformat some of the values in textralines. For instance, when names of surveys are included, replace them with small font shortnames:
for mmm in models:
if 'shortnames' in defaults:
shortname = defaults['shortnames']
if 'survey' in mmm['textralines'] and mmm['textralines'][
'survey'] in shortname:
mmm['textralines']['survey'] = mmm['textralines'][
'survey'].replace(
mmm['textralines']['survey'],
shortname[mmm['textralines']['survey']])
if 'survey' in mmm['textralines']:
mmm['textralines']['survey'] = r'{\smaller\smaller %s}' % mmm[
'textralines']['survey']
# This marks NEARLY the end of all Stata code production. N.B. that before we decide to abort including any Stata code for this table, we must check to make sure that the number of models in the table hasn't changed since Stata was run, since this is one more basic check that we do in the processing below that can result in the advice "rerun stata", below. So, proceed with reading Stata output (unless the log file doesn't even exist.) and then close up the Stata code soon below.
###################################################################################
###################################################################################
###################################################################################
# Read in the txt version of the regression results, and do some initial processing which can be used for
# all three output formats: old (no aggregated results), with averaged columns, and only averaged columns.
tableFileName = tablenamel
if skipReadingResults:
if not all(['eststats' in mm for mm in models]):
not os.path.exists(tableLogName)
print ' skipReadingResults used without eststats in the models! Assuming model not run yet...'
print ' Missing %s... Rerun Stata' % tableLogName
self.append(r'\newpage{ Missing %s... Rerun Stata}\newpage' %
str2latex(tableLogName + extraTexFileSuffix))
return (outs)
pass # Should assert ALL KINDS OF THINGS HERE
assert all(['eststats' in mm for mm in models])
else:
if not os.path.exists(tableLogName):
print ' Missing %s... Rerun Stata' % tableLogName
self.append(r'\newpage{ Missing %s... Rerun Stata}\newpage' %
str2latex(tableLogName + extraTexFileSuffix))
return (outs)
#rlf=readStataRegressionLogFile(tableLogName,output='trows',dropIfStartsWith=['dCountry'],dropVars=None)
#import operator
#allvarsM=uniqueInOrder( reduce(operator.add, [mm['coefs'].keys() for mm in rlf], []))
# # # # # # # # READ MAIN STATA OUTPUT . # # # # # # # # #
modelsOriginal = [deepcopy(mm) for mm in models]
rsrlf = readStataRegressionLogFile(
tableLogName,
dropIfStartsWith=['dCountry'] * 0,
dropVars=None,
models=models)
# Nov 2010: I'm going to include the entire log file in every .tex file I create, so the source result is always with the paper.
# # # # # # # # MAKE FINAL DECISION ON SKIPPING STATA DUE TO AUTO-SKIP-COMPLETED TABLES, ETC # # # # # # # # #
if rsrlf in [None] or 'run Stata and try again' in rsrlf:
skipStata = False # I won't skip stata if the log file is outdated.
print " (OVERRIDE skipStata!: will run Stata for table " + tablename + " because it's outdated.)"
if skipStata == True:
assert self.skipStataForCompletedTables or not skipLaTeX # Why is this necessary?? Maybe a warning should be issued... Also, May 2010: I've added an or for self.skipStataFor... The logic is not quite right. But it avoids the assert to bind in a common situation when we dont' want it to, but still lets it bind sometimes. (ugh)
outs = """
* regTable: Skipping reestimation of entire table %s (logged in %s) because skipStata was set True """ % (
tablename, tablenamel
) + self.skipStataForCompletedTables * ' or because self.skipStataForCompletedTables was set True ' + """
"""
# This marks the end of all Stata code production. The rest is processing and formatting of the results.
if isinstance(rsrlf, str):
print(rsrlf)
self.append(rsrlf)
assert 'First' not in extraTexFileSuffix
return (outs)
if rsrlf == None:
print 'Gone NONE from readSTataRegressionLogFile!'
return (outs)
models = rsrlf['models']
assert models
assert all(['eststats' in mm for mm in models])
# Note the date that this regression was done.
import time, os
regressionDate = time.ctime(os.path.getmtime(tableLogName))
comments += ' Regression carried out on ' + regressionDate #ress['regressionDate']
# Try to add some other useful comments.
allMethods = uniqueInOrder(
[ss for ss in [mm.get('method', '') for mm in models] if ss])
allLHS = uniqueInOrder(
[ss for ss in [mm.get('depvar', '') for mm in models] if ss])
allModels = uniqueInOrder(
[ss for ss in [mm.get('model', '') for mm in models] if ss])
comments += ' Regression methods: %s. Dependent vars: %s ' % (
allMethods, allLHS)
if len(allModels) == 1 and len(allModels[0]) < 300:
comments += ' Common model: %s' % (str2latex(allModels[0]))
###########################################################################################
###
#def displayRegTable(self,tablename,models,method=None,depvar=None,regoptions=None,variableOrder=None,showonlyvars=None,hidevars=None,forceShowVars=None,extralines=None,comments='',substitutions=None,options='',attributes=None,landscape=False,transposed=None,combineRows=None,suppressSE=False, produceOnly=None,extraTexFileSuffix=None,doPcorr=False,stopForErrors=True,crcoefsVars=None,skipStata=False,skipLaTeX=False,hidePSumTest=False,defaultModel=None,hideModelNumbers=False,assignSaveCoefficientsPrefix=None,hideModels=None,showModels=None,hideModelNames=False,renumberModels=True,showFailedRegressions=False,multirowLabels=False,betas=False,followupFcn=None,followupArgs=None,showCompDiff=None,returnModels=False,postPlotFcn=None,postPlotArgs=None,autoCreateVars=True,captureNoObservations=None): # Do a set of regressions; output results to .tex and .txt files
# retired options: useOUTREG2=False,
###
#######################################################################################
"""
Alright, separating this from regTable() in May 2011. This is going to make things fragile, as lots has to be set up correctly for this function to work. So if you mess in the wrong way, things will break. [NOT. Added "skipReadingResults" option instead, above.
"""
#trowsOld=ress['trows']
##############################################################################
############## DROP UNWANTED OR EMTPY MODELS #################################3
#
# If we want to drop/use a subset of the models, do that now.: hideModels / showModels
#
assert not hideModels or not showModels # Can't specify both. Well, actually, the line below gives a sensible way in which they could both be allowed at once: a model would be shown if in showModels (or showModels==None) and not in hideModels.
assert showModels or showModels == None # don't pass [] or ''
assert not returnModels or (showModels is None and hideModels is None)
# Note: hideModels / showModels should be a list of pointers to actual model dicts now, and they should simply be dropped here... No need to deepcopy here, I believe.
#if (not hideModels or isinstance(hideModels[0],dict)) and (not showModels or isinstance(showModels[0],dict)):
if (hideModels and isinstance(hideModels[0], dict)) and (
showModels and isinstance(showModels[0], dict)
): # Allow for strange case of both being specified... BUT this does not allow reordering!
models = [
mmm for mmm in models
if (not showModels or mmm in showModels
) and (not hideModels or mmm not in hideModels)
] # I could instead just set a "hidden" flag for these but keep them around until exporting...
assert models
elif hideModels and isinstance(hideModels[0], dict):
models = [
mmm for mmm in models
if (not hideModels or mmm not in hideModels)
] # I could instead just set a "hidden" flag for these but keep them around until exporting...
assert models
elif showModels and isinstance(
showModels[0], dict): # This allows for reordering the models
models = [mmm for mmm in showModels if mmm in models]
assert models
elif hideModels or showModels: # These are indices (1-based..) to the model list
if showModels:
assert isinstance(showModels, list) and isinstance(
showModels[0], int)
if hideModels: #isinstance(hideModels,list) and hideModels and isinstance(hideModels[0],int):
assert isinstance(hideModels, list) and isinstance(
hideModels[0], int)
showModels = [
nn + 1 for nn in range(len(models))
if nn + 1 not in hideModels
]
if showModels: #isinstance(showModels,list) and showModels and isinstance(showModels[0],int):
# Remove from model dicts:
if max(showModels) - 1 >= len(models):
print 'CAUTION!!!!!!!!!! Your list of model indices includes non-existing models!!!!!!!!!!! Ignoring them...', [
iM for iM in showModels if iM - 1 >= len(models)
]
models = [
models[iM - 1] for iM in showModels if iM - 1 < len(models)
]
if not models:
print ' AWWWWWWWWWWWWWWWWWWWW! No models left!! Aborting this table!'
self.append(
' AWWWWWWWWWWWWWWWWWWWW! No models left!! Aborting this table!')
return ()
assert models
# Also, drop any models which failed (N=0? Here I used r^2=0 or 1)
# Use existence of non-blank r^2 pr r2-p as an indicator of whether a real regression was done.
badR2 = [fNaN, 1, 1.0,
'noR2'] # I think this should just be fNaN in newmode.
for mm in models:
if 'isManualEntry' in model:
continue
if mm.get('special', '') in ['suestTests']:
continue # These can all be empty in this case...
if 'r2' in mm['eststats']:
r2 = mm['eststats']['r2']
else:
r2 = mm['eststats'].get('r2_p', 'noR2')
if r2 in badR2 and mm['method'] not in [
'glm', 'xtreg', 'xtregress'
] and 'ivreg' not in mm[
'method']: # Huh? What about r2-p? N.b.: I'm not detecting failed ivregress yet (2015)
print ' Suppressing model %d (name="%s") from LaTeX table because the regression failed (OR TYPE UNKNOWN to pystata)' % (
mm['modelNum'], mm['name'])
mm['eststats']['r2'] = fNaN
mm['eststats']['r2_a'] = fNaN
mm['eststats']['N'] = 0
if not showFailedRegressions and 0: # CANNOT DO HIDE BROKEN REGs UNTIL REWRITTEN COMBINEROWS FOR NEWMODE...
woeirweoiuJUSTTESTING_CAN_DELETE_THISLINE
models = [
mm for mm in models if mm['eststats']['r2'] not in badR2
] #('r2' in models['eststats'] and
nModels = len(models)
# THIS IS VERY DANGEROUS. DO NOT CALL RENUMBERMODELS IF YOU ARE CR GROUPS...... IE THIS NEEDS TO BE FIXED, SINCE I THINK IT MAY
if renumberModels:
for imm in range(len(models)):
models[imm][
'modelNum'] = imm + 1 # An innovation Sept 2008: This ought to simplify some later stuff, no.??. Rewrite all the model/calc numbering, through this functoin. Should do same with the name.
assert models
def modelCombineRows(models, combineRows):
""" Change the names of regressors in model estimated *results* within a list of model dicts in ordre to combine (or rename) certain variables.
Note that the estimation coefficients in these models should already be "cleaned up" ie blanks should be eliminated; if a regressor is listed, it should have an estimate.
2013: . adict is no longer a dict, but a pandas frame.
"""
def renameKey(adict, vfrom, vto):
if vfrom in adict:
assert vto not in adict
adict[vto] = adict[vfrom]
del adict[vfrom]
for vng in combineRows:
vto = vng[0]
for mmm in models:
if 'estcoefs' not in mmm:
continue
for vfrom in vng[1:]:
if vfrom in mmm['estcoefs']:
# If both a from and a to variable were estimated, this combineRows is impossible:
renameKey(mmm['estcoefs'], vfrom, vto)
if 0: # Code before june 2010 is below:
assert vto not in mmm['estcoefs']
mmm['estcoefs'][vto] = mmm['estcoefs'][vfrom]
del mmm['estcoefs'][vfrom]
# 2010 June: added piece to also rename variables in the covariance matrix..
if 'estCovar' in mmm:
# 2013 June: modified because matrices now come as pandas DFs
if isinstance(mmm['estCovar'], dict):
for vfrom in vng[1:]:
renameKey(mmm['estCovar'], vfrom, vto)
for aa in mmm['estCovar']:
renameKey(mmm['estCovar'][aa], vfrom, vto)
else: # Assume it's a pandas DF
mmm['estCovar'].rename(
columns=dict(
[[vfrom, vto] for vfrom in vng[1:]]),
inplace=True)
mmm['estCovar'].rename(
dict([[vfrom, vto] for vfrom in vng[1:]]),
inplace=True)
###################################################################################
# Remove all ddd_* variables in case of CR-coefs models
###################################################################################
for model in models:
if 'CRgroup' in model:
hidevars += [
vv for vv in model['estcoefs'] if vv.startswith('ddd_')
]
###################################################################################
###################################################################################
# Deal with rows to combine. For instance, when I have both real and nominal incomes, I might want the coefficients to appear on the same row, and just have the difference signified with an indicator row later.
# Or, if my standardzied beta mode is running, we want to rename the z_ variables to their non-z versions, allowing for the possibility that both exist in the estimates table.
import os
debugprint(' ------------------------')
debugprint(tableFileName, ' combine Rows = ', combineRows)
if combineRows:
# Adjust format of this passed parameter:
#combineRows=[rr for rr in [[cell.strip().replace('_','-') for cell in row] for row in combineRows] if rr[0]]
modelCombineRows(models, combineRows)
if any(['compDiffBy' in mm for mm in models]):
print(
' N.B.: combineRows is defined, so will need to revise some keys like compDiffBy...'
)
#if newmode: # I assume the following line is just a safety for backwards compat with what follows?
#combineRows=None # Removed 2013 so that I can replace compdiffby.
###################################################################################
###################################################################################
# Special modes here, to insert new "models" or new variables. e.g.: do compensating differential calculation:
from pylab import sqrt
# COMPENSATING DIFFERENTIALS
for imodel in range(len(models))[::-1]:
if 'compDiffBy' not in models[imodel] or not models[imodel][
'compDiffBy']:
continue
if 'estCovar' not in models[imodel]:
print(
'There is no record of estimation covariance matrix for this model, so SKIPPING COMPENSATING DIFFERENTIALS. Rerun Stata!'
)
continue
elif len(
set(models[imodel]['estCovar'].columns) - set(['_cons'])
) == 0: #not models[imodel]['estCovar']: # ie, it's there, but empty
print(
'\n\n * * * * estCovar is empty. in model %d: "%s": There is no record of estimation covariance matrix for this model, so SKIPPING COMPENSATING DIFFERENTIALS. Rerun Stata?\n'
% (imodel, models[imodel]['name']))
continue
if showCompDiff in [
None, True
]: # this wasn't specified, yet some model(s) had compDiff setting. So choose default ('aftereach' behaviour for showing comp diffs.
showCompDiff = 'aftereach'
frommodel = models[imodel]
##assert frommodel['model']['compDiffBy'] in frommodel['estmodel']
##print 'Model num unsafe in folloinwg.....' (still?? or did this refer to before deepcopy?)
#cdModel=deepcopy(frommodel) # This is TOO MUCH... ONLY COPY A FEW, NEEDED ITEMS. explicitly listed... [April 2011: why? does RAM matter??]
cdModel = dict(
deepcopy([
ims for ims in frommodel.items()
if ims[0] in [
'name', 'modelNum', 'model', 'substitutions', 'depvar',
'stataModelName', 'method', 'eststats', 'format',
'showCompDiffVars', 'compDiffHideVars', 'logFilename'
]
]))
cdModel.update({
'baseModelCopy': deepcopy(frommodel)
}) # Added April 2011.
cdModel.update({
'special': 'compDiff',
'compDiff': True,
'name': "comp. diff's" + 0 * frommodel['name']
}) # ,'modelNum':max([mm['modelNum'] for mm in models])})
cdModel['eststats']['r2'] = fNaN
cdModel['eststats']['r2-a'] = fNaN
cdModel['estcoefs'] = {}
cdModel['eststats'] = deepcopy(frommodel['eststats'])
V = frommodel[
'estCovar'] # 2013 Feb: this changed from dict to pandas df
incvar = frommodel['compDiffBy']
if combineRows:
for tofrom in combineRows:
if incvar in tofrom[1:]:
print('Using %s instead of %s as comp diff income' %
(tofrom[0], incvar))
incvar = tofrom[0]
continue
if incvar not in frommodel['model'] or incvar not in frommodel[
'estcoefs'] or incvar not in frommodel['estCovar']:
cwarning(frommodel['tableName'] +
': YIKES! IMPORTANT THINGS ARE MISSING for "' +
frommodel['name'] + '"' +
""". CANNOT DO COMP DIFFS. TRY RERUNNING STATA?
You asked for """ + incvar + """ as the income variable, but:
incvar in frommodel['model'] , incvar in frommodel['estcoefs'] , incvar in frommodel['estCovar']
""" + str((incvar in frommodel['model'], incvar in
frommodel['estcoefs'], incvar in frommodel['estCovar'])))
continue
# But do we want to show the comp diffs? Or just calculate them? All or some of them?
# 'showDiffCompVars' can be True, False, a string, or a list.
cdvars = list(
set(frommodel['estcoefs'].keys()) - (set([incvar] + [
'_cons',
'age',
'agesq100',
'age100sq',
'age100t3',
'age100t4',
'age100',
])))
# 'compDiffVars'= list or string list of variables for which comp diffs should be calculated and/or displayed. Note that regTable has a table-level parameter "showCompDiff" which decides whether/how comp diffs should be shown.
if 'compDiffHideVars' in cdModel:
if isinstance(cdModel['compDiffHideVars'], str):
cdModel['compDiffHideVars'] = cdModel[
'compDiffHideVars'].split(' ')
cdvars = [
cv for cv in cdvars
if cv not in cdModel['compDiffHideVars']
]
if 'showCompDiffVars' in cdModel:
if isinstance(cdModel['showCompDiffVars'], str):
cdModel['showCompDiffVars'] = cdModel[
'showCompDiffVars'].split(' ')
cdvars = [
cv for cv in cdModel['estcoefs'].keys()
if cv in cdModel['showCompDiffVars'] and not cv == invar
]
for vv in cdvars: # In following, x is the variable of interest, y is income, s is sigma, b is coefficient
bx, by = frommodel['estcoefs'][vv]['b'], frommodel['estcoefs'][
incvar]['b']
sx, sy = frommodel['estcoefs'][vv]['se'], frommodel[
'estcoefs'][incvar]['se']
sxy = V[vv].ix[incvar]
compdiff = bx / by
#secompdiff=sqrt(frommodel['estcoefs'][vv]['se'] - 2*compdiff*V[vv][incvar] + compdiff**2*frommodel['estcoefs'][incvar]['se'] )/frommodel['estcoefs'][incvar]['b']
secompdiff = abs(compdiff) * sqrt((sx / bx)**2 + (sy / by)**2 -
2 * sxy / bx / by)
cdModel['estcoefs'][vv] = {'b': compdiff, 'se': secompdiff}
# Following line looks like I still nee dto program the functionalit for showCompDiff telling me where to put them.
# May 2011: okay.. this should really be done at the *showing* stage... but for quick kluge to add basic functionality
assert showCompDiff in [False, 'aftereach', 'only']
if showCompDiff in ['aftereach']:
models.insert(imodel + 1, cdModel)
frommodel[
'cdModel'] = cdModel # This should just be a pointer to the same thing: I may not want to actually insert a new model, but rather act on the existene of a cdModel element.
#fromModel['cdModel']=cdModel # uhhh.. i can't htink how to insert models as I go in a loop over models.
if showCompDiff in ['only']:
models[
imodel] = cdModel # Destory the from model. replace it with the comp diff model..
##############################################################################
# Make pairedRows, to fit with old code, for the time being.
r2names = [
'e(r2-a)', 'e(r2)', 'e(r2-p)', 'r2', 'r2_a', 'r2-a', 'r2_p', 'r2-p'
]
# Reproduce old behaviour of showing a summed-income-coefficients value by adding a new variable
for model in models:
if model.get('special', '') in ['suestTests']:
continue
#summedIncomecoefs=[sum([icv[iModel] for icv in incomeCoefs if icv[iModel] ]) for iModel in range(len(pairedRows[0][0])-1)]
incsum, seincsum = seSum([
model['estcoefs'][vv]['b'] for vv in model['estcoefs']
if vv in possibleIncomeVars
], [
model['estcoefs'][vv]['se'] for vv in model['estcoefs']
if vv in possibleIncomeVars
])
if incsum and len([
model['estcoefs'][vv]['b'] for vv in model['estcoefs']
if vv in possibleIncomeVars
]) > 1:
model['estcoefs'][
r'$\sum\beta_{\rm{inc}}$NOTUSINGCOVMATRIX'] = {
'b': incsum,
'se': seincsum
}
#assert 0 # Whoa. Should do this sum properly using covariance matrix.... Don't delete copy of my old method, below, yet.
assert not hidePSumTest # This is no longer implemented. And this flag could just add the r(p) etc to hideVars rather than needing special treatment.. ? [sep 2009]
# OR... insert them as a column juust before r(p)
if 0 and 'r(p)' in [ss[0] for ss in sumStatsRows]:
if incomeCoefs:
sumStatsRows.insert(
[
ipair for ipair in range(len(sumStatsRows))
if sumStatsRows[ipair][0] == 'r(p)'
][0], [r'$\sum I$'] + chooseSFormat(
summedIncomecoefs, lowCutoff=1.0e-3))
# And format the r(p) row while I am at it:
#if 'r(p)' in [ss[0] for ss in sumStatsRows]:
rprow = [ss for ss in sumStatsRows if ss[0] == 'r(p)'][0]
rprow[1:] = chooseSFormat(rprow[1:], lowCutoff=1.0e-3)
# If there is any information about a desired order of
if 0:
# Remove hidevars from LaTeX output versions:
if hidevars:
noshowKeys = uniqueInOrder(re.split(' ', hidevars.strip()))
for ipair in range(len(pairedRows))[::-1]:
if pairedRows[ipair][0][0] in noshowKeys:
pairedRows.pop(ipair)
if hidevars: # Careful; this feature hasn't been properly commensurated with showonlyvars:
extralines = [
el for el in extralines if el[0].strip() not in noshowKeys
]
sumStatsRows = [
el for el in sumStatsRows
if el[0].strip() not in noshowKeys
]
if betas: # Constant term is zero for standardized beta coefficients. So if they're all beta, don't show constant.
hidevars += ' _cons'
# kludge sept 2009: [no, not kludge. this is a formatting decision which should be here]
skipCounts = 0
for mm in models:
if 'skipNumber' in mm:
skipCounts += 1
if 'isManualEntry' not in mm or 'texModelNum' not in mm:
mm['texModelNum'] = '(%d)' % (mm['modelNum'] - skipCounts)
if skipCounts:
print ' Equation numbering reflects some "skipNumber"s and does not correspond to actual estimate numbers'
###################################################################################
###################################################################################
###################################################################################
# I can immediately create the full version of the LaTeX file except for combining rows (models):
####tableFileName=tablenamel
tableTexName, bothOutfilename, justaggFilename, justpooledFilename, crcFilename, crcOnlyFilename = tablenamel + extraTexFileSuffix, tablenamel + '-withMeans' + extraTexFileSuffix, tablenamel + '-onlyMeans' + extraTexFileSuffix, tablenamel + '-onlyPooled' + extraTexFileSuffix, tablenamel + '-wCRcoefs' + extraTexFileSuffix, tablenamel + '-CRcoefs' + extraTexFileSuffix
if not produceOnly == None:
produceOnlyLower = produceOnly.lower()
else:
produceOnlyLower = None
# May 2011: add the texfilesuffix to caption so that the mod'd versions show up more nicely in TOC in auto LaTeX doc.
tablecaption = tablename
if extraTexFileSuffix:
tablecaption += extraTexFileSuffix
if produceOnly == None or produceOnlyLower in [None, 'onlyraw']:
if not skipLaTeX:
self.appendRegressionTable(
models,
tableFormat={
'caption': tablecaption,
'comments': comments,
'hideVars': hidevars,
'variableOrder': variableOrder,
'hideModelNames': hideModelNames
},
suppressSE=suppressSE,
substitutions=substitutions,
tableFilePath=defaults['paths']['tex'] + tableTexName +
'.tex',
transposed=transposed,
sourceLogfile=tableLogName
if not skipReadingResults else None
) #landscape=landscape,rowModelNames=rowModelNames,hideVars=hidevars,extralines,modelTeXformat=modelTeXformat, tableCaption=tablename,tableComments=comments
#(not hideModelNames)*[amo['depvar'] for amo in models],['(%s)'%dd for dd in range(1,len(models)+1)],pairedRows,extralines+sumStatsRows, suppressSE=suppressSE,substitutions=substitutions,modelTeXformat=modelTeXformat, landscape=landscape, tableFilePath=defaults['paths']['tex']+tableTexName+'.tex', tableCaption=tablename, tableComments=comments, transposed=transposed,rowModelNames=rowModelNames,hideRows=hidevars)
# ##self,models, extrarows,greycols=None,suppressSE=False,substitutions=None,modelTeXformat=None,transposed=None, tableFilePath=None, tableCaption=None, tableComments=None):
###################################################################################
###################################################################################
###################################################################################
# Now, do any preparations needed for aggregating *models*. ie calculating means of results from similar models applied to different data.
#
# Have I excluded failed-regressions, like I did in the oldmethod?
lSumGroups = uniqueInOrder(
[m['meanGroupName'] for m in models if 'meanGroupName' in m])
sumGroups = dict([[
crg, [mm for mm in models if mm.get('meanGroupName', None) == crg]
] for crg in lSumGroups])
#
#lCRgroups=uniqueInOrder([m['CRgroup'].keys()[0] for m in models if 'CRgroup' in m])
#CRgroups=dict([[crg,[mm for mm in models if mm.get('CRgroup',None)==crg]] for crg in lCRgroups])
#lSumGroups=uniqueInOrder([m['meanGroupName'] for m in models if 'meanGroupName' in m])
#sumGroups=dict([[crg,[mm for mm in models if mm.get('meanGroupName',None)==crg]] for crg in lSumGroups])
# It would be a mistake to call this in means mode if there are no models to combine.
# So ? huh what is means mode? What did following assert supposed to do?
#assert( sumGroups or not produceOnly or (produceOnly.lower() not in ['withmeans','onlymeans']) )
if 1:
# Following needs translating to newmethod:
# Ensure that each agg group are all members of the same CR group:
#aggCRgroups=[[models[iag-1]['CRgroup'] for iag in aggGroup if 'CRgroup' in models[iag-1]] for aggGroup in aggColumns]
for sg in sumGroups:
sgbyVar, sgbyStat, sgbyTextraline = modelResultsByVar(
sumGroups[sg])
meanModel = {
'estcoefs': {},
'eststats': {},
'texModelNum': r'$\langle$%s-%s$\rangle$' %
(str(sumGroups[sg][0].get('modelNum', '??!') - 0),
str(sumGroups[sg][-1].get('modelNum', '??!') - 0)),
'modelNum': 0,
'name': sumGroups[sg][0].get('meanGroupName',
r'\cpblMeanRowName '),
'tableshading': 'grey',
'textralines': {},
'isMean': True
}
meanModel['meanGroupName'] = meanModel[
'name'] # why was this needed?
assert meanModel[
'name'] # May 2011.. just reading through some code and not sure whether above is safe.
for vv in sgbyVar:
mu, se = seMean(sgbyVar[vv]['coefs'], sgbyVar[vv]['ses'])
if mu not in [None, fNaN]:
meanModel['estcoefs'][vv] = {
'b': mu,
'se': se,
}
meanModel['eststats']['N'] = sum(
sgbyStat['N']) #'$\\geq$%d'%min(sgbyStat['N'])
#eNcol[insertAt]='$\\geq$%d'%(min([int(eNcol[iii+1]) for iii in igroup ]))
# Now copy over the flags that are in common for all addends in this group:
for vv in sgbyTextraline:
if all([
sgbyTextraline[vv][0] == tt
for tt in sgbyTextraline[vv]
]): #All addends have the same value for this flag:
meanModel['textralines'][vv] = sgbyTextraline[vv][0]
elif vv == 'survey': # Special treatment for "survey" flag
meanModel['textralines'][
'survey'] = r'{\smaller \smaller $\langle$%d$\rangle$}' % (
len(uniqueInOrder(sgbyTextraline['survey']))
) ##nNonZeroRegs)#len([agc for agc in aggColumns[iGroup] if >0]))
###bextralines[irow].insert(insertionColumns[iGroup],r'{\smaller \smaller $\langle$%d$\rangle$}'%nNonZeroRegs)#len([agc for agc in aggColumns[iGroup] if >0]))
# And for all other fields in the model, copy over all other elements of the summed-over models which are in common.
for kk in sumGroups[sg][0].keys():
if kk not in meanModel and all([
sumGroups[sg][0][kk] == tt.get(kk, None)
for tt in sumGroups[sg]
]): #All addends have the same value for this flag:
meanModel[kk] = sumGroups[sg][0][kk]
#print '%s are in common .. copying to mean...'%kk
""" Now do the flags:
for irow in range(len(bextralines)):
" For the current group, insert a value into each of the extralines fields."
thisval=extralines[irow][aggColumns[iGroup][-1]] # Value for this extraline in right-most of addend columns
if all([extralines[irow][ic]==thisval for ic in aggColumns[iGroup]]): # All addends have the same value for this property, so copy it:
bextralines[irow].insert(insertionColumns[iGroup],thisval)
#iExtraLinesMeans+=[irow]
elif 'survey '==bextralines[irow][0]:
bextralines[irow].insert(insertionColumns[iGroup],r'{\smaller \smaller $\langle$%d$\rangle$}'%nNonZeroRegs)#len([agc for agc in aggColumns[iGroup] if >0]))
else:
bextralines[irow].insert(insertionColumns[iGroup],'')
"""
# Insert this new "model" in the right place? Insert also some dividers?
iLast = [
ii for ii in range(len(models))
if models[ii] == sumGroups[sg][-1]
]
assert len(
iLast
) == 1 # Important assertion: make sure models are not repeated object pointers. (or do this more generally, earlier on?)
# Aug 2011: Following looks bizarre. Should copy the format of the first, not the last. Why? well... if we're reusing models, does this... uh... no, maybe it's fine. problem is somewhere else.
meanModel['format'] = deepcopy(models[iLast[0]]['format'])
if meanModel['estcoefs']:
models.insert(iLast[0] + 1, meanModel)
#print "nEED TO GO THROUGH FLAGS HERE AND PRESERVE ONES THAT ARE IN COMMON?? AND DEAL WITH SUM OVER SURVEYS IN A CUSTOM WAY"
###if sumGroups:
### # Create a list of CRgroups; keep track of this as list of models gets changed in various ways
### CRgroups=[m.get('CRgroup',{}) for m in models]
if (not skipLaTeX) and (sumGroups or combineColumns) and (
(produceOnly == None and self.skipAllDerivedTables == False
) or produceOnlyLower in [None, 'withmeans', 'means']
): # or produceOnly.lower() not in ['means','crc','onlyraw']:
self.appendRegressionTable(
models,
tableFormat={
'caption': tablecaption + '~(with~means)',
'comments': comments,
'hideVars': hidevars,
'variableOrder': variableOrder,
'hideModelNames': hideModelNames
},
suppressSE=suppressSE,
substitutions=substitutions,
tableFilePath=defaults['paths']['tex'] + bothOutfilename +
'.tex',
transposed=transposed,
sourceLogfile=tableLogName)
#self.append('%s\\begin{table}\caption{%s}\\include{%s}\n%s\n\\end{table}%s\\clearpage\n'%(lscapeb,tablename.replace(' ','~')+'~(with~means)',bothOutfilename,comments,lscapee))
print('Wrote "both-out" tex file...')
if (sumGroups or combineColumns) and (
(produceOnly == None and self.skipAllDerivedTables == False) or
produceOnlyLower in [None, 'means', 'justmeans', 'onlymeans']
): # or produceOnly.lower() not in ['means','crc','onlyraw']:
# "means-only" version:
# Make a version with only new columns, plus any columns
# which were not part of an aggregate.. For instance, some
# columns may only be possible for a single survey. It
# would be nice here to have an entry in multiple rows of
# tiny font to show the surveys included in means.
if 1:
onlyMeans = deepcopy([
mm for mm in models
if not any(
[any([mm in sumGroups[sg]]) for sg in sumGroups])
])
if 0: #WTF does the following not work?
onlyMeans = [
mm.update({
'tableshading': None
}) for mm in deepcopy(onlyMeans)
]
for mm in onlyMeans:
mm['tableshading'] = None
if any(onlyMeans):
if not skipLaTeX:
self.appendRegressionTable(
onlyMeans,
tableFormat={
'caption': tablecaption + '~(only~means)',
'comments': comments,
'hideVars': hidevars,
'variableOrder': variableOrder,
'hideModelNames': hideModelNames
},
suppressSE=suppressSE,
substitutions=substitutions,
tableFilePath=defaults['paths']['tex'] +
justaggFilename + '.tex',
transposed=transposed,
sourceLogfile=tableLogName)
debugprint(
'Wrote "just-means" tex file... [newmethod]')
else:
print ' --- onlyMeans array had no real entries. Why? '
######################################################################################################
# Now, do one other strange thing. When I've used byCR to isolate coefficients at different spatial scales, I want to extract those isolated coefficients one by one for each series. Use just the only-means version of the table, which collapses all models run on each survey into one line. Single-survey models are also there.
# I think in Aug 2008, CR coeffs functionality is working on summed models but not on unsummed (ie single survey) ones. Why?
# late Aug 2008: One bug: when CT-level controls produce no regression, the line should be ignored when picking off CR coefficients. In particular, this is failing for models with only one survey..
# So, to fix this, I should really be working from nCRgroups, not aggCRgroups. If the former doesn't exist, I'll need to fix that still.
# Make lookups for the groups:
lCRgroups = uniqueInOrder(
[m['CRgroup']['id'] for m in models if 'CRgroup' in m])
#It seems the following isn't working; but is it even useful?
CRgroups = dict([[
crg, [
mm for mm in models
if mm.get('CRgroup', {}).get('id', None) == crg
]
] for crg in lCRgroups])
if 0:
##CRgroupModels=[m for m in models if 'CRgroup' in m.keys()]#[[k for k in m.keys() if 'CRgroup' in k] for m in models]
if CRgroups and aggCRgroups and any(
aggCRgroups
): ##any([any(['CRgroup' in k for k in m.keys()]) for m in models]):
"""
Plan: loop over each group which has CRgroup tag not equal to nothing:
find list of all unique group names
find all models in that group.
get list of CRs
etc
N.B.: I want to keep any models which are not part of a CRgroup but which appear in the npairedRows. It's just that I want to collapse any CRgroup models into one line.
More detail needed here...
How do I avoid copying blanks?
Once I know my variables that I am going to fill in and I know which models are part of the group,
shouldn't I just copy each non-blank value down to its appropriate entry in order that the models are called? ie assume that fixed effects are called in order of decreasing scope from one model to the next?
"""
# Prepare versions of output for a CR-f.e. table:
crpairedRows = deepcopy(npairedRows)
crsumStatsRows = deepcopy(nsumStatsRows)
crextralines = deepcopy(nextralines)
crcolumnName = deepcopy(ncolumnName)
crcmodelTeXformat = ['c' for ccc in crcolumnName]
crCRgroups = deepcopy(nCRgroups)
allGroups = uniqueInOrder(
[gg.keys()[0] for gg in crCRgroups
if gg]) # Must preserve order.
# Initialise list of models (row pairs) to keep:
toDelete = [] #toKeep=range(len(npairedRows[0])-1) #Fine
clustercol = [
ff for ff in crextralines if 'clustering' in ff[0]
]
if clustercol:
clustercol = clustercol[0]
surveycol = [ff for ff in crextralines if 'survey' in ff[0]]
if surveycol:
surveycol = surveycol[0]
eNcol = [
ff for ff in crsumStatsRows if ff[0] in ['e(N)', 'N']
] ##'e(N)' == ff[0]]
if eNcol:
eNcol = eNcol[0]
if CRgroups:
print('About to cycle through these CR groups: %s' %
str(CRgroups.keys()))
from cpblUtilities import flattenList
for CRg in CRgroups:
if len(CRgroups[CRg]) < 2:
continue
# Store location for insertion of new CR coefs model.
iLastModel = [
imm for imm, mmm in enumerate(models)
if mmm is CRgroups[CRg][-1]
][0]
# Also record a copy of the original set of models in the CR group:
originalCRmodels = [mm for mm in CRgroups[CRg]]
# Also note (first and last) non-means, for labelling CR coefs
firstLast = [
mm for mm in originalCRmodels if not mm.get('isMean', False)
]
# Now, May 2010: the CRgroups could include fake-models that are sums over models, and/or real (original) models. I guess it's easy to make the old behaviour work: I'll say that if any of them are means, then just take the means.
if any([mm.get('isMean', False) for mm in CRgroups[CRg]]):
for im, gm in enumerate(CRgroups[CRg]):
if not gm.get('isMean', False):
gm['hideInCRCview'] = True
CRgroups[CRg] = [
mm for mm in CRgroups[CRg] if mm.get('isMean', False)
]
# So now in this CRgroup we have either all means or no means.
gmodels = CRgroups[CRg]
# Assume the CRs are decreasing in scale and that they start with a model with no f.e. dummies.
topCR = gmodels[0]['CRgroup']['takeCoef']
# Find out the list of CRs to deal with:
CRs = [gm['CRgroup']['takeCoef'] for gm in gmodels]
# assert CRs[-1]=='' # This is rather dictated by the withCRcells or whatever function, right? And we surely always want it: ie Wel... do we???? hm. no. revisit this if you get this assert...
# assert len(CRs)>1 # Why? Not necessary, but silly if not true
# Find out which variables we are going to collect CR coefs from. Look for them in the smallest scale (HH), ie no CRs.
if crcoefsVars == None:
crvars = flattenList([
vv for vv in gmodels[-1]['estcoefs']
if 'ln' in vv and 'ncome' in vv
])
if crvars == []:
print 'Oh dear.. I found no variables for CR. WHAT IS GOING ON? Maybe this regression just failed...'
# I don't undestand why they would be in the last one, rather than the first.
# Uhhhh. hm.
crvars = flattenList([
vv for vv in gmodels[0]['estcoefs']
if 'ln' in vv and 'ncome' in vv
])
else:
crvars = [
vv for vv in crcoefsVars if vv in gmodels[-1]['estcoefs']
]
print ' %s: CR coefficient collection mode: Looking for CRs %s and variables %s.' % (
CRg, str(CRs), str(crvars))
assert crvars
newCRmodel = deepcopy(gmodels[-1])
newCRmodel.update({
'isCRcoefs': True,
'texModelNum': r'$\langle$%s-%s$\rangle$' %
(str(firstLast[0].get('modelNum', '??!') - 0),
str(firstLast[-1].get('modelNum', '??!') - 0))
}) # ,'modelNum':0})
# Need to fiddle here with some flags??
for kk in ['%s~f.e.' % CR for CR in CRs
if CR]: # Get rid of CR~f.e. entries. ...
#if kk in newCRmodel['textralines']:
newCRmodel['textralines'][kk] = r'\YesMark'
newCRmodel['eststats']['N'] = '$\\geq$%d' % (min(
[int(gg['eststats']['N']) for gg in CRgroups[CRg]]))
newCRmodel['textralines']['clustering'] = r'\YesMark'
newCRmodel['name'] += ': CRcoefs'
anythingForNewModel = False
for im, gm in enumerate(gmodels[:-1]):
for crvar in crvars:
copyCR = gm['CRgroup']['takeCoef']
if copyCR.lower() + '_' + crvar in gm['estcoefs']:
newCRmodel['estcoefs'][copyCR.lower(
) + '_' + crvar] = deepcopy(
gm['estcoefs'][copyCR.lower() + '_' + crvar])
anythingForNewModel = True
# Now insert this new model (maybe still needs some annotation.?) right after the final one in the group?
if anythingForNewModel:
self.addSeparator(newCRmodel)
self.addSeparator(models[iLastModel])
models.insert(iLastModel + 1, newCRmodel)
if 0:
if 0:
for groupName in CRgroups: #allGroups:
# Find the PR f.e. extralines row because I am going to use it to note new property. [HUH?.]
#topCRfeLine=[nel for nel in nextralines if topCR+'~f.e.' in nel[0] and any([col.strip(' ~0') for col in nel[1:]])]
#startFrom='PR' # Assume all CR sequences contain the same CRs???
#if not TopCR: # If there was no PR row, try for CMA in stead:
# TopCR=[nel for nel in nextralines if 'CMA~f.e.' in nel[0]]# and any([col.strip(' ~') for col in nel[1:]])]
# startFrom='CMA'
# Now, find variables corresponding to the various CR coefficients of interest, noting that PR may not exist (in May 2008 I stopped using PR as a fixed effect)
incomeColsI = {}
#for ig in igroup[1:]: # Assume first has no CR f.e.
# CR=crCRgroups[ig][groupName]
CRs = ['DA', 'CT', 'CSD', 'CMA', 'PR']
# Now, to choose which columns to look at to pick out coefficients, hunt for ones that look income related. Alternatively, the caller may specify them directly [new feature aug 2008. presumably not yet implemented]:
# But careful:
for CR in CRs:
#incomeColsI[CR]=([ff for ff in range(len(crpairedRows)) if CR.lower()+'-log' in crpairedRows[ff][0][0]]+[[]])[0]#Robust to finding nothing, though if loop is over igroup that would be impossible.
if crcoefsVars == None: # Should this be a string? list of vars?
incomeColsI[CR] = [
ff for ff in range(len(crpairedRows))
if CR.lower() + '-log' in
crpairedRows[ff][0][0]
]
else:
incomeColsI[CR] = [
ff for ff in range(len(crpairedRows))
if any([
CR.lower() + '-' + crcoefvar in
crpairedRows[ff][0][0]
for crcoefvar in crcoefsVars
])
]
# Now, deal with one "CR" scale specially: this is the individual level (non-CR), ie the "HH" household level. So I am still just finding variables, not models. This is the set of all other income-related stuff that does not have any CR-level with a fixed-effect sometimes provided.: Stick those into incomeColsI['HH'].
if crcoefsVars == None: # Find all other income vars (ie, the default behaviour):
incomeColsI['HH'] = [
ff for ff in range(len(crpairedRows))
if 'ln' in crpairedRows[ff][0][0] and 'ncome' in
crpairedRows[ff][0][0] and all(
[ff not in incomeColsI[CR] for CR in CRs])
]
else: # If the variable names have been specified, we want the ones which have no CR_ prefix.
incomeColsI['HH'] = [
ff for ff in range(len(crpairedRows))
if crpairedRows[ff][0][0] in crcoefsVars
] # and all([ff not in incomeColsI[CR] for CR in CRs ])]
# incomeCols is just used for debugging at the moment:
incomeCols = {}
for CR in incomeColsI.keys():
incomeCols[
CR] = [crpairedRows[ff] for ff in incomeColsI[CR]]
#print('CR>: ',[[LL[0] for LL in incomeCols[kk]] for kk in incomeCols])
## print(incomeCols['PR'])
## print(incomeCols['CMA'])
## print(incomeCols['CSD'])
## print(incomeCols['CT'])
## print(incomeCols['DA'])
# Now, insert a new model at the end of the CR group which will receive the extracted coefficients:
insertAt = igroup[-1] + 1 + 1
print(nrowModelNames)
isthisreached
for sp in crpairedRows:
sp[0].insert(insertAt, '')
sp[1].insert(insertAt, '')
for el in crextralines: # Copy all of the extralines over.
el.insert(
insertAt, deepcopy(el[insertAt - 1])
) #Rewrite this in terms of igroup, not insertAt.
for el in crsumStatsRows: # Leave these blank.
el.insert(insertAt, '')
if hideModelNames:
crcolumnName.insert(insertAt - 1, '')
else:
crcolumnName.insert(
insertAt - 1,
'CR~coefs') # -1 because there's no label in [0]?
#crcolumnNumbers.pop(td-1) # This is reset below
nrowModelNames.insert(
insertAt - 1,
'') # huh??? what is this again? row or col
crcmodelTeXformat.insert(insertAt - 1, 'c|')
if '|' not in crcmodelTeXformat[insertAt - 2]:
crcmodelTeXformat[insertAt - 2] = 'c|'
crCRgroups.insert(insertAt - 1,
{}) #deepcopy(crCRgroups[igroup[1]]))
print(nrowModelNames)
CRs = [
'HH', 'DA', 'CT', 'CSD', 'CMA', 'PR', ''
] # first element, HH, gets all te non-CR coefs first.
stillToCopy = deepcopy(CRs)
print('CR>: ', [[kk, [LL[0] for LL in incomeCols[kk]]]
for kk in incomeCols])
# Now, if there are more f.e. than one, for each f.e., copy the next smallest coefficent over.
# ie loop over the f.e. models and copy appropriate coefficients from each model.
# I say coefficientS because for the no-f.e. case, we want all CR levels larger than or equal to the biggest f.e.
# For the no-f.e. model, copy
# Uh.. is this from an older alogirithm?: # First, start by copying over all income variables from the smallest f.e. model: this should get HH income, etc.
#for ipr in incomeColsI['HH']:
# prow=crpairedRows[ipr]
# prow[0][insertAt]=prow[0][ig+1]
# prow[1][insertAt]=prow[1][ig+1]
# New algorithm: Cycle over all CRs from small to large. For each, copy over smaller CR coefficents remaining in a list, and then *Delete* those from the list. (Why did I do this crazy fancy thing? I could have just redundantly copied them over until each is filled... e.g. start from final model and work backwards, filling in any not already filled.)
# Try rewriting the algorithm below, late August 2008 frustrated and confused as to why i needed something so complex.
for ig in igroup[::
-1]: # Loop backwards over models within our group so we go from small to large: ie algorithm still depends on the models being called in the right order. (?) ????
# For each model, copy over everything of interest which is not yet filled. It seems my list of all the variables of interest is stuck in incomeColsI
iRelevantVariables = []
for ii in incomeColsI:
iRelevantVariables += incomeColsI[ii]
for iRelevantVariable in iRelevantVariables:
# Fill in any variable not already filled:
prow = crpairedRows[iRelevantVariable]
if not prow[0][insertAt]:
prow[0][insertAt] = prow[0][ig + 1]
prow[1][insertAt] = prow[1][ig + 1]
#CR=crCRgroups[ig][groupName] # This finds the CR f.e. of a particular model in our group
# CRs to copy is all those models with f.e. that are smaller than CR and not yet copied:
#iii=[ic for ic in range(len(stillToCopy)) if stillToCopy[ic]==CR]
# The rather simpler version above, late Aug 2008, seems to work for income CRs, at least. Is there any reason not to use it also for the general case???
"""
CRstoCopy:
"""
crKeepRows = []
for skipThisOldMethod in []: #ig in igroup[::-1]: # Loop backwards over models within our group so we go from small to large: ie algorithm still depends on the models being called in the right order. (?) ????
CR = crCRgroups[ig][
groupName] # This finds the CR f.e. of a particular model in our group
# CRs to copy is all those models with f.e. that are smaller than CR and not yet copied:
iii = [
ic for ic in range(len(stillToCopy))
if stillToCopy[ic] == CR
]
if iii:
CRstoCopy = stillToCopy[0:(iii[0])]
print(stillToCopy, CRstoCopy, CR)
for cc in CRstoCopy:
stillToCopy.remove(cc)
for CRtoCopy in CRstoCopy:
# So loop over the variable rows with those income coefficients:
print('<CR>: So I am looking for rows with ' +
CRtoCopy + '\n')
for ipr in incomeColsI[CRtoCopy]:
"""
At this point, CRtoCopy is the CR level being copied,
stillToCopy lists the CRs larger than the current one;
incomeColsI is a dict which tells me which variable number contains each CR coefficient.
So the present loop is copying coefficients for one CR from each model large to small each available
"""
#print('<CR>: Row %d is one\n'%ipr)
crKeepRows += [ipr]
prow = crpairedRows[ipr]
if prow[0][
ig +
1]: # Only copy over the values if they are not blank. So, for missing regressions, the model is counted as included but actually skipped in filling in the CR coefs row.
prow[0][insertAt] = prow[0][ig + 1]
prow[1][insertAt] = prow[1][ig + 1]
elif 'CRd' in tableFileName:
pass # sdofisdf
#if not prow[0][ig+1]:
# prow[0][insertAt]=groupName
print(
'In CRgoup %s, Copied value "%s" from indep var: %s to new indep var with position %d'
% (groupName, prow[0][ig + 1],
prow[0][0], insertAt))
else:
print 'uhhh..iii empty'
# Now, remove all coefficients in other rows that shouldn't be in this CR summary one:
# We want to keep all the income rows:
#keepCRrows=
# And fix up some of the other "otherlines" entries:
if clustercol: # This is not quite right for the with-CRcoefs output format. It's made for the only-CRcoefs...
debugprint('CLUSTERCOL:', clustercol)
clustercol[0] = 'geo~fixed~effects'
clustercol[insertAt] = r'\YesMark'
debugprint('CLUSTERCOL:', clustercol)
#if surveycol:
# surveycol[insertAt]=deepcopy(surveycol[igroup[-1]+1])
if eNcol:
eNcol[insertAt] = '$>$%d' % (min([
int(eNcol[iii + 1]) for iii in igroup
if int(eNcol[iii + 1]) > 0
]))
# No... I want to mark when the CT one failed; a ">0" is as good as anything.
eNcol[insertAt] = '$\\geq$%d' % (min(
[int(eNcol[iii + 1]) for iii in igroup]))
###crcmodelTeXformat[iTopCR-1]='c|'
# And get rid of all f.e. columns and "clustering" columns
# (not done...)
for el in range(len(crextralines))[::-1]:
if 'f.e.' in crextralines[el][0]:
crextralines.pop(el)
# Still need to fix some column titles and column numbers.
#crcolumnNumbers=['(%d)'%dd for dd in range(1,len(crpairedRows[0][0]))]
madeOutputCRC = False
if (CRgroups) and not skipLaTeX and [
mm for mm in models if mm.get('isCRcoefs', False)
] and ((produceOnly == None and self.skipAllDerivedTables == False) or
produceOnlyLower in [None, 'withcrc', 'crc']):
self.appendRegressionTable(
[mm for mm in models if not mm.get('hideInCRCview', False)],
tableFormat={
'caption': tablecaption + '~(with~crc)',
'comments': comments,
'hideVars': hidevars,
'variableOrder': variableOrder
},
suppressSE=suppressSE,
substitutions=substitutions,
tableFilePath=defaults['paths']['tex'] + crcFilename + '.tex',
transposed=transposed,
sourceLogfile=tableLogName)
print('Wrote "with CRC coefs" tex file...')
madeOutputCRC = True
if (CRgroups) and (
(produceOnly == None and self.skipAllDerivedTables == False
) or produceOnlyLower in [None, 'withcrc', 'onlycrc', 'crc']
) and [mm for mm in models
if mm.get('isCRcoefs', False)] and not skipLaTeX:
self.appendRegressionTable(
[mm for mm in models if mm.get('isCRcoefs', False)],
tableFormat={
'caption': tablecaption + '~(only~crc)',
'comments': comments,
'hideVars': hidevars,
'variableOrder': variableOrder
},
suppressSE=suppressSE,
substitutions=substitutions,
tableFilePath=defaults['paths']['tex'] + crcOnlyFilename +
'.tex',
transposed=transposed,
sourceLogfile=tableLogName)
print('Wrote "only CRC coefs" tex file...')
madeOutputCRC = True
if not madeOutputCRC and defaults['mode'] in [
'RDC'
]: # or 'redux' not in tablenamel or 'ontrol' not in extraTexFileSuffix
print ' No CRC-containing tables were made for %s (%s) because of produceOnly, no CRcoefs, or some other reason ' % (
tablenamel, extraTexFileSuffix)
# if 0:# and produceOnly==None or produceOnly.lower() in ['withcrc']:
# if not skipLaTeX:
# self.old_forPairedRows_appendRegressionTable((not hideModelNames)*crcolumnName,crcolumnNumbers,crpairedRows,crextralines+crsumStatsRows,substitutions=substitutions,suppressSE=False,#suppressSE,
# landscape=landscape, tableFilePath=defaults['paths']['tex']+crcFilename+'.tex', tableCaption=tablename+'~(with~CRC~coefs)', tableComments=comments,modelTeXformat=crcmodelTeXformat,
# transposed=transposed,rowModelNames=nrowModelNames,hideRows=hidevars)
# debugprint('Wrote "with CRC coefs" tex file...')
# #len(crcolumnName),len(crcolumnNumbers),len(crpairedRows),len(crextralines),len(crsumStatsRows)
# if 0:
# if 0:
# for groupName in allGroups:
# igroup=[imodel for imodel in range(len(crCRgroups)) if crCRgroups[imodel] and crCRgroups[imodel].keys()[0]==groupName]
# print('Found %d models in CR group %s\n'%(len(igroup),groupName))
# # This is for a different form of output in which only CRCs are there... I want the option of both.
# # Now, get rid of all those models which have just been used to make CR summaries:
# ##toDelete=[xx+1 for xx in sorted(list(set(igroup)-set([min(igroup),max(igroup)])) )] # ie keep the first (no f.e.) and last (extracted CR coefs)
# # ie keep the first (no f.e.) and last (extracted CR coefs)
# toDelete=[xx+1 for xx in igroup[1:]]#sorted(list(set(igroup)-set([min(igroup),max(igroup)])) )]
# print(' Deleting rows ', toDelete, ' for ',groupName, ' which is at ',igroup)
# for td in toDelete[::-1]: # This hardcodes the fact that the first entry is no f.e., and second entry receives CR summaries.
# for sp in crpairedRows:
# sp[0].pop(td)
# sp[1].pop(td)
# for el in crextralines:
# el.pop(td)
# for el in crsumStatsRows:
# el.pop(td)
# crcolumnName.pop(td-1) # -1 because there's no label in [0]?
# #crcolumnNumbers.pop(td-1) # This is reset below
# nrowModelNames.pop(td-1)
# crcmodelTeXformat.pop(td-1)
# crCRgroups.pop(td-1) # So that next igroup will be calculated correctly.
# # Still need to fix some column titles and column numbers.
# crcolumnNumbers=['(%d)'%dd for dd in range(1,len(crpairedRows[0][0]))]
# if produceOnly==None or produceOnly.lower() in ['means','crc']:
# 1/0
# if not skipLaTeX:
# self.old_forPairedRows_appendRegressionTable((not hideModelNames)*crcolumnName,crcolumnNumbers,crpairedRows,crextralines+crsumStatsRows,substitutions=substitutions,suppressSE=False,#suppressSE,
# landscape=landscape, tableFilePath=defaults['paths']['tex']+crcOnlyFilename+'.tex', tableCaption=tablename+'~(CRC~coefs)', tableComments=comments,modelTeXformat=crcmodelTeXformat,
# transposed=transposed,rowModelNames=nrowModelNames,hideRows=hidevars)
# debugprint('Wrote "CRC coefs" tex file...')
# print 'continuing after missing CR section...'
# # Finally, make a version which excludes all addends AND all means. ie just conventional, pooled columns.
# if 1:
# print 'not sure that all features are yet implemented in new method; see older code above'
# onlyPooled=[mm for mm in models if not any([any([mm in sumGroups[sg]]) for sg in sumGroups]) and not 'meanGroupName' in mm]
# if onlyPooled and not skipLaTeX:
# self.appendRegressionTable(onlyPooled,tableFormat={'caption':tablecaption+'~(only~pooled)','comments':comments,'hideVars':hidevars,'variableOrder':variableOrder},
# suppressSE=suppressSE,substitutions=substitutions, tableFilePath=defaults['paths']['tex']+justaggFilename+'.tex', transposed=transposed)
#
# NOW DEAL WITH FOLLOW-UP FUNCTIONS: PASS ALL OUTPUT DATA TO ANY DESIRED PLOTTING,ETC FUNCTION(S)
#
# Automatically invoke subSampleAccounting whenever getSumSampleSums is invoked in any model:
if any(['getSubSampleSums' in mm for mm in models]):
if not followupFcn:
followupFcn = [subSampleAccounting]
elif not followupFcn == subSampleAccounting and not subSampleAccounting in followupFcn:
followupFcn += [subSampleAccounting]
if followupFcn: # Maybe this call should come earlier, if models gets too fiddled with.
standardArgs = {
'tableFileName': tableFileName,
'tablename': tablename,
'substitutions': substitutions
}
for ff in standardArgs:
followupArgs[ff] = followupArgs.get(ff, standardArgs[ff])
if isinstance(followupFcn, list):
for fcn in followupFcn:
fcn(self, models, followupArgs)
else:
followupFcn(self, models, followupArgs)
if returnModels:
# Make effort now to send the updated models out to the calling function. #:( ie via updating the list/dict.
# April 2010: I think this fails if a list of list of dicts is sent.........1
lom = len(originalModels)
# I've tried to find a way to do this which gets the model pointers into the original pointer list. I think the following strange combination works: I have to start off replacing existing elements, and then I can extend that list without losing the original address.
for imm in range(len(models)):
if imm < lom:
originalModels[imm] = models[imm]
else:
originalModels += [models[imm]]
#originalModels=originalModels[lom-1:]
# That (above) seems successful..
return (outs) # Returns stata code
def includeFig(self, figname=None, caption=None, texwidth=None,
title=None): #,,onlyPNG=False,rcparams=None
if texwidth == None:
texwidth = r'[width=0.5\columnwidth]'
elif not texwidth == '':
texwidth = '[width=%s]' % texwidth
figlocation, figfile = os.path.split(figname)
if not figlocation:
figlocation = r'\texdocs '
else:
figlocation += '/'
if not os.path.exists(figname+'.pdf'):
print(' Caution: Cannot find included figure file for LaTeX.')
if caption == None:
caption = ' (no caption) '
if title == None:
title = figfile
# % Arguments: 1=filepath, 2=height or width declaration, 3=caption for TOC, 4=caption title, 5=caption details, 6=figlabel
figCode = r"""
\clearpage\newpage\clearpage
%\begin{landscape}
\cpblFigureTC{""" + figlocation + figfile + '.pdf}{' + texwidth[
1:
-1] + r'}{ ' + title + '}{' + title + '}{' + caption + '' + '}{' + figfile + """}
%\end{landscape}
\clearpage
"""
# r'\ctDraftComment{'+figlocation+figfile+'}'+
#/home/cpbl/rdcLocal/graphicsOut/corrsBynCR-A50-macroTS-GSS17-GSS22-belongCommunity}}{\linkback{corrsBynCR-A50-macroTS-GSS17-GSS22-belongCommunity}}{corrsBynCR-A50-macroTS-GSS17-GSS22-belongCommunity}
# Old version, retired:
r"""
\begin{figure}
\centering \includegraphics""" + texwidth + '{' + figlocation + figfile + '.pdf' + r"""}
\caption[""" + figname + title + r'AAA]{ \ctDraftComment{' + figlocation + figfile + '}' + title + '' + caption + 0 * (
r' \ctDraftComment{' + figlocation + figfile + '}') + r"""
\label{fig:""" + figfile + r"""} }
\end{figure}
%\end{landscape}
\clearpage
"""
figtout = open(
defaults['paths']['tex'] + 'tmpFig-' + figfile + '.tex',
'wt',
encoding='utf-8')
figtout.write(figCode)
figtout.close()
self.append(r"""%% \input{\texdocs tmpFig-""" + figfile + """}
""" + figCode)
print ' Included %s in latex file...' % figname
return ()
def saveAndIncludeStataFig(self, figname, caption=None, texwidth=None):
"""
You probably want to include the following in your stata plot command:
graphregion(color(white)) bgcolor(white)
"""
pp, ff, ee = [os.path.split(figname)[0]] + list(
os.path.splitext(os.path.split(figname)[1]))
if pp in ['']: pp = paths['graphics']
stout = graphexport(pp + '/' + ff + '.pdf')
self.includeFig(pp + '/' + ff, caption=caption, texwidth=texwidth)
return (stout)
def saveAndIncludeFig(
self,
figname=None,
caption=None,
texwidth=None,
title=None, # It seems title is not used.
onlyPNG=False,
rcparams=None,
transparent=False,
eps=False, # EPS is useless. No transparency or gradients. Make it from png?
# And include options from savefigall() [April 2011]
ifany=None,
fig=None,
skipIfExists=False,
pauseForMissing=True,
bw=False,
FitCanvasToDrawing=False,
rv=False,
dpi=None):
""" Save a figure (already drawn) and include it in the latex file.
Dec 2009: Also, create a .tex file that can be included in other drafts. ie this means caption etc can be updated automatically too.
July 2010: hm, where to put this stuff, though? Put it all in texdocs?? Agh, it's become a mess. At the moment this works if you just don't give any path as part of figname. (duh, it's a name, not a path).. Oh. mess because I make two kinds of output. A direct include and an \input .tex file! So look again, and decide on the paths...
July 2010: This will now be a repository for making sure I have nice TeX fonts in here, too??? So I need to worry about the physical width of the figure and font sizes, then too.... as options... not done!! But see my figureFontSetup in utilities!! So not yet implemented: rcparams.
Sept 2010: Moved some code out of here into includeFig(), which includes an existing graphics file
Sep t2010: Move the default location to paths['graphics'] from paths['tex']. includeFig still makes an includable .tex file that wraps the fig, and this .tex file is still in paths['tex']
Sept 2010: looks at self.skipStataForCompletedTables to determine whether to save the fig... This could be made more choicy with a parameter, forceUpdate...
June 2012: Added / Passing rv option through to savefigall
"""
import pylab
#####alreadySaved= os.path.split(figname)[0] and (os.path.exists(figname) or os.path.exists(figname+'.pdf')) and title==None and onlyPNG==False and rcparams==None# Oh: the figure is already done..., is an absolute path. Just include it.
classSaysSkipUpdate = self.skipStataForCompletedTables
if self.skipSavingExistingFigures is not None:
classSaysSkipUpdate = self.skipSavingExistingFigures
from matplotlib import rc
if 0: # What?. This should be at the output stage. June 2012
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
# # for Palatino and other serif fonts use:
# rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
if 0:
params = {#'backend': 'ps',
'axes.labelsize': 20,
'text.fontsize': 14,
'legend.fontsize': 10,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'text.usetex': False,}
#'figure.figsize': fig_size}
if rcparams:
plt.rcParams.update(rcparams)
if figname == None:
figname = self.fname + '_' + str(pylab.gcf())
# Careful. Adding this 2012 Feb:
from cpblUtilities import str2pathname
# oh dear... i commented out the following april 2010. will this break anything?
#else:
# title='[%s]'%title
figlocation, figfile = os.path.split(figname)
if not figlocation:
figlocation = paths['graphics']
else:
figlocation += '/'
# But if this is going to be included in LaTeX, it cannot contain an underscore.
figfile = str2pathname(figfile).replace(
'_', '-') # really? I have other utilties for this...
#figpath=figname
if onlyPNG:
if classSaysSkipUpdate and os.path.exists(
figlocation + figfile + '.png') and os.path.exists(
figlocation + figfile + '.pdf'):
print ' Skipping saving figure ' + figlocation + figfile + ' because it already exists and self.skipStataForCompletedTable==True'
else:
pylab.savefig(
figlocation + figfile + '.png'
) # Missing the bw option to make both colour and bw versions.
assert not "oops: ifany=None,fig=None,skipIfExists=False,pauseForMissing=True): not implemented yet"
print " Wrote a figure: " + figname + '.[png]'
else:
from cpblUtilities import savefigall
if (skipIfExists or classSaysSkipUpdate) and os.path.exists(
figlocation + figfile + '.png') and os.path.exists(
figlocation + figfile + '.pdf'):
print ' Skipping saving figure ' + figlocation + figfile + ' because it already exists and self.skipStataForCompletedTable==True'
rootAndTail = True # Huh? this is a kluge based soley on "if rootandtail" below. I dont' know what root and tail are right now.
else:
rootAndTail = savefigall(
figlocation + figfile,
transparent=transparent,
ifany=ifany,
fig=fig,
pauseForMissing=pauseForMissing,
bw=bw,
FitCanvasToDrawing=FitCanvasToDrawing,
eps=eps,
rv=rv,
dpi=dpi)
print " Wrote a figure: " + figname + '.[pdf/png]'
if rootAndTail:
self.includeFig(
figlocation + figfile, caption=caption, texwidth=texwidth)
return ()
def compareMeansInTwoGroups(self,
showVars,
ifgroups,
ifnames,
tableName=None,
caption=None,
usetest=None,
skipStata=False,
weight=' [pw=weight] ',
substitutions=None,
datafile=None):
"""
May 2011. I have written almost all of this into addDescriptiveStatistics() as the mode=compareMeans mode, but I'm moving it to its own function to complete it.
So write this to work as part of latex output, only.
Plan: generate code, which creates its own logfile. If logfile exists, process it into a table.
Can use a t-test or a ranksum (Two-sample Wilcoxon rank-sum (Mann-Whitney) test) to compare means.
"""
import time
statacode = ''
if datafile is not None:
statacode += stataLoad(datafile)
if substitutions is None:
substitutions = self.substitutions
if isinstance(showVars, basestring):
showVars = [vv for vv in showVars.split(' ') if vv]
tablenamel = self.generateLongTableName(tableName) #,skipStata=False)
print """regTable(): Initiated "%s" with %d variables. """ % (
tablenamel, len(showVars))
tableLogName = paths['tex'] + tablenamel + '.log'
tableLogNameWithDate = defaults['paths']['stata'][
'working'] + 'logs/' + tablenamel + time.strftime(
'%Y_%m_%d_%H%M%S_') + '.log'
if self.skipStataForCompletedTables and os.path.exists(tableLogName):
if not skipStata:
print ' Skipping Stata for %s because latex.skipStataForCompletedTables is set ON!!!! and this table is done.' % tablenamel
statacode += """
"""
skipStata = True
assert len(ifgroups) == 2
assert len(ifnames) == 2
statacode += """
log using %s, text replace
* CPBL BEGIN TABLE:%s: AT %s
""" % (tableLogNameWithDate, tableName,
time.strftime('%Y_%m_%d_%H%M%S'))
statacode += """
capture noisily drop _cmtg
gen _cmtg=0 if %s
replace _cmtg=1 if %s
""" % (ifgroups[0], ifgroups[1])
print 'Still need to add weights and svy behaviour.'
statacode += '\n'.join([("""
capture confirm variable """ + vv + """,exact
if _rc==0 {
capture confirm numeric variable """ + vv + """,exact
if _rc==0 {
di "-~-~-~-~-~-~-~-~"
mean """ + vv + ' if ' + ifgroups[0] + """
test """ + vv + """
mean """ + vv + ' if ' + ifgroups[1] + """
test """ + vv + """
*x~x~x~x~x~x~x~x~x~
ranksum """ + vv + """, by(_cmtg)
*--~--~--~--~--~
}
}
""").replace('\n ', '\n') for vv in showVars if vv])
statacode += """
* CPBL END TABLE:%s: AT %s
* Succeeded / got to end.
* Closing %s
capture noisily log close
""" % (tableName, time.strftime('%Y_%m_%d_%H%M%S'),
tableLogNameWithDate) + """
* Since we've successfully finished, set the preceeding log file as the conclusive one
copy "%s" "%s", replace
""" % (tableLogNameWithDate, tableLogName)
if skipStata:
statacode = ''
oiuoiu
if not os.path.exists(tableLogName):
print " **** SKIPPING THE COMPARE MEANS BY GROUP IN %s BECAUSE YOU HAVEN'T RUN Stata yet to make it." % tableLogName
return (statacode)
stataLogFile_joinLines(tableLogName)
fa = re.findall("""\* CPBL BEGIN TABLE:%s:(.*?)\* CPBL END TABLE:%s:"""
% (tableName, tableName), ''.join(
open(tableLogName, 'rt').readlines()), re.DOTALL)
assert len(fa) == 1
meanRE = '\s*Mean estimation\s*Number of obs\s*=\s*(\w*)\n(.*?)\n'
meanRE = '\s*Mean estimation\s*Number of obs\s*=\s*(\w*)\n[^\|]*\|[^\|]*\|\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\n[\s-]*'
meanRE = r'\s*Mean estimation\s*Number of obs\s*=\s*([^\s]*)\n[^\|]*\|[^\|]*\|\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\n[\s-]*'
testRE = '.*? Prob > ([Fchi2]*) = +([^\s]*)\s*'
WilcoxonTestRE = '.*? Prob > \|z\| = +([^\s]*)\s*'
# Following long RE is meant to get the long list of variables in the line below (with tonumeric...) for each variable/condition
vps = re.findall(
'\n-~-~-~-~-~-~-~-~\n\s*mean (\w*) if ([^\n]*)\n' + meanRE +
'\s*test ([ \w]*)\n' + testRE + '\s*mean' + ' (\w*) if ([^\n]*)\n'
+ meanRE + '\s*test ([ \w]*)\n' + testRE +
'\s*.x~x~x~x~x~x~x~x~x~\n\s*ranksum ([^\n]*)\n' + WilcoxonTestRE +
'\s*\*--~--~--~--~--~', fa[0], re.DOTALL)
if not len(vps) == len(showVars) and len(vps) > 0:
print " RegExp fails to match to Number of variables, but there are some. Probably some requested variables didn't exist? INVESTIGATE, and only request extant variables."
showVars = [vv for vv in showVars if vv in [vvv[0] for vvv in vps]]
print(' Continuing, using only %s' % str(showVars))
if not len(vps) == len(showVars):
print " RegExp fails to match to Number of variables, though maybe some didn't exist? Or the format's changed / r.e. broken. Instead, assuming code has changed. Aborting. Rerun Stata..."
return (statacode)
varDicts = []
body = []
headers = ['Variable', ifnames[0], ifnames[1], r'$p$(equal)']
for iv, oneVar in enumerate(vps):
v1, if1, N1, mu1, se1, low1, high1, v1t, fchi1, pmu1, v2, if2, N2, mu2, se2, low2, high2, v2t, fchi2, pmu2, Wts, pW = tonumeric(
[kk for kk in oneVar])
if not (showVars[iv] == v1 and showVars[iv] == v2 and
showVars[iv] == v1t and showVars[iv] == v2t):
print(
' Problem with variable alignment. RERUN Stata. *********** '
)
return (statacode)
xs1, ses1 = latexFormatEstimateWithPvalue(
[mu1, se1],
pval=pmu1,
allowZeroSE=None,
tstat=False,
gray=False,
convertStrings=True,
threeSigDigs=None)
xs2, ses2 = latexFormatEstimateWithPvalue(
[mu2, se2],
pval=pmu2,
allowZeroSE=None,
tstat=False,
gray=False,
convertStrings=True,
threeSigDigs=None)
pWs = latexFormatEstimateWithPvalue(
pW,
pval=pW,
allowZeroSE=None,
tstat=False,
gray=False,
convertStrings=True,
threeSigDigs=None)
body += [
[substitutedNames(v1, substitutions), xs1, xs2, pWs],
['', ses1, ses2, ''],
]
cpblTableStyC(
cpblTableElements(
body='\\\\ \n'.join(['&'.join(LL) for LL in body]) +
'\\\\ \n\\cline{1-\\ctNtabCols}\n ',
cformat=None,
firstPageHeader='\\hline ' + ' & '.join(headers) +
'\\\\ \n\\hline\n',
otherPageHeader=None,
tableTitle='Comparing means for %s and %s (%s)' %
(ifnames[0], ifnames[1], tableName),
caption=None,
label=None,
ncols=None,
nrows=None,
footer=None,
tableName=tableName,
landscape=None),
filepath=paths['tex'] + tablenamel + '.tex',
masterLatexFile=self)
#logfname+'-compareMeans-'+str2pathname('-'.join(ifNames))
"""
\n-~-~-~-~-~-~-~-~[^~]*\|[^~]*\|\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\n' +\
r'[^~]*Prob >([\sFchi2]*)=\s*([^\s]+)\s+\}(.*?)x~x~x~x~x~x~x~x~x~'#'#(\s+.*?)~'#
"""
return (statacode)
def addDescriptiveStatistics(self,
tablename=None,
dataFile=None,
codebook=None,
showVars=None,
weightVar=None,
ifcondition=None,
forceUpdate=False,
mainSurvey=None,
code=None,
ifNames=None,
caption='',
mode=None,
substitutions=None):
# callStata=False,
""" Starting over.
This is now a member of latexRegressionFile, rather than the codebook class.
There are two modes. See old getDescriptiveStatistics in codebook class for comments (to copy here) no move here.
If DTA is specified, this will CALL STATA and make the log file (if needed). It adds a table to the self. [Oh.? This seems wrong if there is ifcondition]
IF DTA is not specified, this will produce STATA CODE to make the means log. It returns the stata code and adds a table to the self.
If codebook is specified, at the moment it still requires one of the above, but codebook can allow descriptions etc to be avaialble.
June 2010: I added the feature of being able to give a list of "if conditions", but it still turns these into separate tables. Need to combine the tables.
June 2010: Added "ifNames" option: give names to the ifconditions.
Nov 2010: Does not work as advertised. returning Stata code fails due to assert datafile. I got around it only by specifying a manual codebook. Maybe that's a feature.
-->> *** Also fails to give warninga bout notFound if execStata is False. Fix this..
May 2011: adding mode=None. if mode='compareMeans', make a table which compares means for two groups. oh .. .no.. I've made that into a separate function now, so I shouldn't have meddled here.
2013 April: Added substitutions option. Works, at least, for singlesurvey means.
"""
# Two modes: return stata code or run it standalone, ie loading a file.
execStata = dataFile is not None
#assert codebook==None # Not programmed yet. Dec 2009. But it should be. Then this does everything. Codebook one could just call the old function in codebook class...
assert tablename # Right now not sure how to generate automatic name for this table... Unless there's just one per latex file... So this must be specified.
from cpblUtilities import str2pathname
# Retired mar 2013: logname=str2pathname(logname)#''.join([cc for cc in logname if cc not in [' ']])
"""Old comments:
Actually, self.codebook or codebook can be used to specify a DTA file instead of a codebook class...
This is evolving again. If callStata==False, this returns Stata code to make a log file containing means USING THE DATASET CURRENTLY IN MEMORY.
If callStata==True, then DTA should be specified (if it doesn't exist yet, we will just skip this with a warning). Also in this case, weightIf can specify the syntax for weight and a ocondition to be used in calcuating means -- ie it's in Stata syntax format. In this case, Stata will actually be called when this function executes and generate the means separately.
This function adds a table to the output LaTeX file.
So, this can be used either in a sort of automatic mode when closing a LaTeX file, e.g. based on the variables that have been used altogether, or it can be used to make an intermediate table of stats in the middle of a LaTeX file.
I NEED EXAMPLES HERE......... (no, see docs above (in latex class?))
"""
# Possibly specify the relevant survey and/or datafile:
if dataFile and self.mainDataFile and not dataFile == self.mainDataFile:
print 'CAUTION! Overriding %s with %s for main survey for stats' % (
self.mainDataFile, dataFile)
if not dataFile:
dataFile = self.mainDataFile
if mainSurvey == None:
mainSurvey = self.mainSurvey
if not dataFile and mainSurvey:
# This doesn't seem quite right -- I should be savvy enough here to load up whatever is available, ie incorporating info from the PDF codebook from Stats Can, not just what fits into a DTA file.
dataFile = WP + 'master' + mainSurvey
if codebook == None:
codebook == self.codebook
if codebook == None and mainSurvey:
codebook = stataCodebookClass(survey=mainSurvey)
# Choose variables to make stats of:
""" 2010Feb. If showVars is not specified, then we try all variables in the variablesUsed list, but note that the Stata code below is robust to each variable not existing. Only those that exist are sum'ed, mean'ed, or read.
"""
if showVars == None:
assert self.variablesUsed
showVars = [
vv for vv in uniqueInOrder(self.variablesUsed.split(' ')) if vv
]
if isinstance(showVars, str) or isinstance(showVars, unicode):
showVars = uniqueInOrder([vv for vv in showVars.split(' ') if vv])
#Require that variables are a unique list; otherwise order will be messed up? (Or could rely on auto ordering)
assert isinstance(showVars, list)
if not len(uniqueInOrder(showVars)) == len(showVars):
print(
" UHHHHH: You probably want to fix this so as not to screw up order..."
)
showVars = uniqueInOrder(showVars)
vString = ' '.join(showVars)
print ' To make stats for ' + vString
if ifNames:
assert isinstance(ifcondition, list)
assert len(ifNames) == len(ifcondition)
# Choose weight and condition:
if isinstance(ifcondition, str):
ifcondition = [ifcondition]
if not ifcondition == None:
weightsif = []
for oneifc in ifcondition:
assert isinstance(oneifc, str)
weightsif += [' if ' + oneifc]
else:
weightsif = [' ']
for iwif, xx in enumerate(weightsif):
if weightVar == None:
weightsif[iwif] += ' [w=weight] '
elif not weightVar: # To turn off weights, just send ''
weightsif[iwif] += ' '
else:
weightsif[iwif] += ' [w=' + weightVar + '] '
# Actually, we must have weights; don't allow unweighted results.. Why? I'm reversing this. Sometimes data are macro.
if 0:
assert weightsif[iwif].strip()
# Find a codebook, if available. (self.codebook can be a filename: dec 2009)
if codebook: # Why the foodle does this fail?.?.
assert isinstance(codebook, stataCodebookClass)
elif 0 and mainSurvey: # I've converted mainSurvey into dataFile, above, so I should not consider this option first, right??
print " SORRY!! I still don't know how to keep track well of codebook stuff based on survey name. So ignoring mainSurvey=" % mainSurvey
#codebook=stataCodebookClass(fromPDF)
else:
print "Why am I creating/using a full codebook from DTA for this file, when I may have specified particular if conditions for the statistics I want? Because I want to have the descriptions for these variables, even though my specific stats table call may contain if conditions, etc... (explanation Jan 2009)"
assert dataFile
codebook = stataCodebookClass(
fromDTA=dataFile,
recreate=self.recreateCodebook,
showVars=showVars) # Restrict to just the desired variables..
#elif self.codebook and isinstance(self.codebook,str):
# DTA=self.codebook
if not codebook:
foooooo
print(' NOOO descriptive statistics for you!!!!!!!!!!!! ')
return (' NOOO descriptive statistics for you!!!!!!!!!!!! ')
# Choose logfile to use (and/or read):
# This is a bit hacked together. Names might contain redundacies...
if dataFile:
pp, ff = os.path.split(dataFile)
if ff.lower().endswith('.dta.gz'):
ff = ff[0:-4]
else:
dataFile += '.dta.gz' # So that I can check its timestamp below
if tablename and not 'pre-2013 mode':
logfname = self.fpathname + '-summaryStatistics-' + tablename + ''
elif tablename:
logfname = None # Actually, afater 2013, logfname is simply ignored
else:
logfname = 'summaryStatisticsFromStata_%s' % (ff.replace(
'.', '_')) #%os.path.splitext(ff)[0]
tablename = logfname
print 'Generating automatic descriptive statistics from %s using weight/condition "%s", and variables "%s" and condition %s into logfile %s.' % (
dataFile, str(weightsif), vString, str(ifcondition), logfname)
else:
print ' Producing Stata code to generate descriptive statistics using weight/condition "%s", and variables "%s".' % (
str(weightsif), vString)
if not '2013 and later mode':
logfname = self.fpathname + '-summaryStatistics-' + tablename + ''
else:
logfname = tablename
# Make the Stata code
outPrint = '\n'
sload = ''
if execStata:
sload = stataLoad(dataFile)
""" Goal here is to ensure that each variable exists before asking for a sum, and to ensure it's numeric before asking for a mean.
N.B. As for 2010Feb, I am not yet reading/using the mean calculation.
"""
tablenamel = self.generateLongTableName(
tablename, skipStata=False) #skipStata)
tableLogNameNoSuffix = defaults['paths']['stata']['tex'] + tablenamel
tableLogName = tableLogNameNoSuffix + '.log'
import time
tableLogNameWithDate = defaults['paths']['stata'][
'working'] + 'logs/' + tablenamel + time.strftime(
'%Y_%m_%d_%H%M%S_') + '.log'
#logfname (obselete)
outPrint += """
capture noisily log close
log using %s,replace text """ % tableLogNameWithDate + """
""" + sload
if code:
outPrint += code['before']
for iowi, oneweightsif in enumerate(weightsif):
mweightsif = oneweightsif.replace('[w=', '[pw=')
oneifName = oneweightsif.replace('"', "'")
if ifNames:
oneifName = ifNames[iowi]
outPrint += ''.join([
"""
capture confirm variable """ + vv + """,exact
if _rc==0 {
di "*-=-=-=-=-=-=-=-= """ + vv + ' ' + oneifName + """"
sum """ + vv + ' ' + oneweightsif +
(defaults['server']['stataVersion'] == 'linux11'
) * """, nowrap""" + """
*~=~=~=~=~=~=~=~
return list
capture confirm numeric variable """ + vv + """,exact
if _rc==0 {
di "-~-~-~-~-~-~-~-~"
mean """ + vv + ' ' + mweightsif + """
test """ + vv + """
}
}
*x~x~x~x~x~x~x~x~x~
""" for vv in showVars if vv
])
outPrint += """
capture noisily log close
copy "%s" "%s", replace
* Succeeded / got to end.
""" % (tableLogNameWithDate, tableLogName)
if not execStata:
outPrint += """
"""
# Following section should probably be used/integrated somehow... mar2013
# if self.skipStataForCompletedTables and os.path.exists(tableLogName):
# if not skipStata:
# print ' Skipping Stata for %s because latex.skipStataForCompletedTables is set ON.... and this table is done.'%tablenamel
# outs+="""
# """
# skipStata=True
# Call Stata if indicated
# Parse output logfile if indicated
""" Now... I may or may not be calling Stata below. IF I am returning Stata code OR I don't need to recreate the log file, then I won't call Stata.
"""
#if not dataFile or (forceUpdate==False and (not fileOlderThan(tableLogName+'.log',dataFile) and 'Succeeded' in open(tableLogName+'.log','rt').read())):
if execStata and not forceUpdate and not fileOlderThan(
tableLogName, dataFile) and 'Succeeded' in open(tableLogName,
'rt').read():
print '--> Using EXISTING ' + tableLogName + ' for summary stats.\n If you want to refresh it, simply delete the file and rerun.'
elif execStata:
stataSystem(outPrint, filename=WP + 'do' + tablenamel)
if not os.path.exists(tableLogName):
print 'Seems to have failed. 023872398723!!!!'
assert ' ' not in tableLogName
if not execStata and not os.path.exists(tableLogName):
print " **** SKIPPING THE DESCRIPTIVE STATS IN %s BECAUSE YOU HAVEN'T RUN the regressions STATA YET to make it." % (
tableLogName)
return (outPrint)
sfields = [
['N', 'N', 'N'],
['sum_w', '', ''],
['mean', 'mean', ''],
['Var', '', ''],
['sd', 'sd', ''],
['min', 'min', ''],
['max', 'max', ''],
['sum', '', ''],
]
mfields = [
['mean', 'mean', ''],
['se', 'se', ''],
['cilb', 'cilb', ''],
['ciub', 'ciub', ''],
['Fchi2', '', ''],
['p', 'p', ''],
]
sstr=r"""
.?\s*.?-=-=-=-=-=-=-=-= (\S*) ([^\n]*)
.\s+sum (\S*) ([^\n]*)(.*?)
.\s+.~=~=~=~=~=~=~=~
.\s+return list[\n]*
scalars:
"""+ '\n'.join([r'\s+r.%s. =\s+(\S*)'%ff[0] for ff in sfields]) +r'.*?\n-~-~-~-~-~-~-~-~[^~]*\|[^~]*\|\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\n' +\
r'[^~]*Prob >([\sFchi2]*)=\s*([^\s]+)\s+\}(.*?)x~x~x~x~x~x~x~x~x~'#'#(\s+.*?)~'#
print ' ********* Above probalby needs nowrap since I added that 2010 may (same with some other nowraps..) ... ONLY IF STATA 11.'
#stataLogFile_joinLines(tableLogName+'.log')
# Nov 2010: Horrid kludge..... I'M SURE THIS WILL CREATE BUGS... ?. If it looks like file not already fixed (?). well, not, not conditional: Is this somehow necessary when I make this funciton produce code rather than run stata??
stataLogFile_joinLines(tableLogName)
if 0: # 2012 and earlier versoin:
fa = re.findall(
sstr, ''.join(open(tableLogName, 'rt').readlines()), re.DOTALL)
# 2013 new version: for speed, split up the regexp task:
#logtxt=''.join(open(tableLogName+'.log','rt').readlines())
vsections = re.findall('-=-=-=-=-=-=-=-=(.*?)x~x~x~x~x~x~x~x~x~',
''.join(open(tableLogName, 'rt').readlines()),
re.DOTALL) # Fast
#vsections=re.split( '-=-=-=-=-=-=-=-=',''.join(open(tableLogName+'.log','rt').readlines()))[1:]
sstr = r""" (\S*) ([^\n]*)
.\s+sum (\S*) ([^\n]*)(.*?)
.\s+.~=~=~=~=~=~=~=~
.\s+return list[\n]*
scalars:
""" + '\n'.join(
[r'\s+r.%s. =\s+(\S*)' % ff[0] for ff in sfields]
) + r'.*?\n-~-~-~-~-~-~-~-~[^~]*\|[^~]*\|\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\n' + r'[^~]*Prob >([\sFchi2]*)=\s*([^\s]+)\s+\}'
sstr2 = r""" (\S*) ([^\n]*)
.\s+sum (\S*) ([^\n]*)(.*?)
.\s+.~=~=~=~=~=~=~=~
.\s+return list[\n]*
scalars:
"""
descStats2 = []
ifOrder = []
for vsection in vsections:
piecesA = re.findall(sstr2 + """(.*?)\n-~-~-~-~-~-~-~-~\n(.*)""",
vsection, re.DOTALL)
if not len(piecesA) == 1:
print("""
===============================================
addDescriptiveStats: len(piecesA)=={} for {}
ABORTING ENTIRE DESCRIPTIVE STATS TABLE.
Most likely this will get resolved if you rerun
things once or twice.
===============================================
""".format(len(piecesA), vsection))
return ('')
#assert len(piecesA)==1
# Following is failing April2013 on a long variable.
# meanStr=r'[^~]*\|[^~]*\|\s*([^\s]*)\s+([^\s]*)\s+([^\s]*)\s+([^\s]*)\n' + r'[^~]*Prob >([\sFchi2]*)=\s*([^\s]+)\s+\}'
# Updated re string , April 2013, which doesn't fail on long var names
meanStr2013 = '\n\s*[^ ]+\s*\|\s*([^\s]*)\s*([^\s]*)\s*([^\s]*)\s*([^\s]*)\n-*\n .*?Prob >([\sFchi2]*)=\s*([^\s]+)\s+\}'
mDescstats = zip([mf[1] for mf in mfields],
re.findall(meanStr2013, piecesA[0][-1],
re.DOTALL)[0])
sStr = '\n'.join([r'\s+r.%s. =\s+(\S*)' % ff[0] for ff in sfields])
sDescstats = zip([mf[1] for mf in sfields],
re.findall(sStr, piecesA[0][5])[0])
##re.findall(sStr,pp,re.DOTALL)
vv = piecesA[0]
# July 2010: I'm worried that this is not backwards compatible.. can I make it so?
if ' if ' in vv[3]:
ifClause = vv[3].split(' if ')[1].split(' [')[0].strip(
) # This was my first try: this the if clause taken straight from the Stata sum command. A nice way to do it, but if ifNames have been provided, we should really use the names here.
else:
ifClause = ''
ifClause = vv[1].strip(
) # New June 2010: The name of the if clause is available in the log file now. I suppose I could do an idiot check here to check for changes in the correspondence between ifs and ifnames (ie in caller vs log file). Not done.
descStats2 += [
dict(
[
[
'var',
vv[2],
],
['if', ifClause],
] + sDescstats + mDescstats
#[ [sfields[isf][1], vv[5+isf]] for isf in range(len(sfields))]
#[ [mfields[isf][1], vv[5+len(sfields)+isf]] for isf in range(len(mfields)) ]
)
]
if 0:
assert vv[5 + len(sfields) + len(mfields)] == '\n }\n\n*'
ifOrder += [ifClause]
ifOrder = uniqueInOrder(ifOrder)
if not vsections: # not fa and
print 'SUMMARY STATISTICS *****failed ************* regexp found nothing .... ', tableLogName
# Maybe there was an error in the log file... ie log file shouldn't be copied over unless successful...
fooo
## descStats={}
## for vv in fa:
## if vv[0] in descStats:
## continue # OBSELETE. descStats fails with multiple ifs.
## descStats[vv[0]]={}
## for isf in range(len(sfields)):
## descStats[vv[0]][sfields[isf][1]] = vv[3+isf]
## # Above fails if more than on if condition. June 2010
## del descStats
if 0:
descStats2 = []
ifOrder = []
for vv in fa:
# July 2010: I'm worried that this is not backwards compatible.. can I make it so?
if ' if ' in vv[3]:
ifClause = vv[3].split(' if ')[1].split(' [')[0].strip(
) # This was my first try: this the if clause taken straight from the Stata sum command. A nice way to do it, but if ifNames have been provided, we should really use the names here.
else:
ifClause = ''
ifClause = vv[1].strip(
) # New June 2010: The name of the if clause is available in the log file now. I suppose I could do an idiot check here to check for changes in the correspondence between ifs and ifnames (ie in caller vs log file). Not done.
descStats2+=[dict([['var',vv[2],],
['if',ifClause],
]+[ [sfields[isf][1], vv[5+isf]] for isf in range(len(sfields))] +\
[ [mfields[isf][1], vv[5+len(sfields)+isf]] for isf in range(len(mfields)) ]
)]
assert vv[5 + len(sfields) + len(mfields)] == '\n }\n\n*'
ifOrder += [ifClause]
ifOrder = uniqueInOrder(ifOrder)
# Check that we found all we were looking for? If we are supposed to make the log file as a standalone right here, and we aren't forced to remake it, then we *may not* have made it freshly, in which case it may be stale. If it might be stale (is missing some vars), offer to delete it.
notFound = [
vv for vv in showVars
if vv not in [dsv['var'] for dsv in descStats2]
]
if notFound and execStata and not forceUpdate:
if 'yes' == raw_input(
' ---> Some variables (%s) not found in the summary statistics log file. Shall I delete so it will be recreated next time? ([no]/yes)'
% str(uniqueInOrder(notFound))):
os.remove(tableLogName)
elif notFound:
print ' UN-FINISHED WARNING HERE!.... NEEDS DEVELOPMENT. SOME VARIABLES NOT FOUND IN LOG FILE: ', notFound
# Now a bit of a kludge. If we have codebook info, we want it. BUT our means may not be totally general. So let's make a deep copy of the codebook, if there is one, and put the descStats into it.
# June 2010: Further kludge. for multiple conditions, just update the codebook for each one, in turn. Order of conditions not yet fixed.
if not codebook:
codebook = stataCodebookClass()
from cpblUtilities.dictTrees import dictTree
byIf = dictTree(descStats2, ['if', 'var'])
if ifcondition == None:
ifcondition = []
print "Got %d sumstats from %s for %d requested variables for %d conditions." % (
len(descStats2[0].keys()) - 2, tableLogName, len(showVars),
len(ifcondition))
if ifNames and len(ifcondition) > 1:
assert ifOrder == ifNames
print ' -------> I am going to create a CSV which combines a bunch of ifconditions for the descriptive stats! (' + str(
ifNames) + ') '
allIfsCSV = ''
for iIf, anifcond in enumerate(ifOrder): #byIf.keys()):
codebookT = deepcopy(codebook)
for vv in byIf[anifcond]:
codebookT[vv] = codebookT.get(vv, {})
codebookT[vv]['sumstats'] = byIf[anifcond][vv][
0] #ds2#escStats2[vv]
# Now generate a table with these data.
comments = 'If some variables have missing values, try deleting %s.' % str2latex(
tableLogNameNoSuffix) + caption
if os.path.exists(tableLogNameNoSuffix + '.tex'):
import time
statsDate = time.ctime(
os.path.getmtime(tableLogNameNoSuffix + '.tex'))
comments += ' Stats carried out on ' + statsDate
if ifcondition:
comments += ' Samples here were taken with the following condition: "' + str2latex(
anifcond) + '"' #ifcondition)+'"'
codebookT.summaryStatisticsTable_singleSurvey(
texFilename=tableLogNameNoSuffix + '-%d.tex' % iIf,
latex=self,
showVars=showVars,
comments=comments,
substitutions=substitutions)
# 2010 Jun: Also create a .csv file *from* the .tex.
### from cpblUtilities import cpblTableToCSV
fout = open(tableLogNameNoSuffix + '-%d.csv' % iIf, 'wt')
tmpCSV = tableToTSV(tableLogNameNoSuffix + '-%d.tex' % iIf)
fout.write(tmpCSV)
fout.close()
if ifNames and len(ifcondition) > 1:
allIfsCSV += '\n\n' + anifcond + '\n\n' + tmpCSV
# So in June 2010 I used a sequence of these in csv format to concatenate a series of tables with different ifconditions.: See regressionsAknin... Actually, let's do it here..
if ifNames and len(ifcondition) > 1:
fout = open(tableLogNameNoSuffix + '-all.csv', 'wt')
fout.write(allIfsCSV)
fout.close()
# Actually, I may want a different kind of table: May 2011. This just compares the means for the different conditions. Codebook stuff not necessary.
if mode == 'compareMeans':
assert ifcondition
assert len(ifcondition) == 2
comments = 'If some variables have missing values, try deleting %s.' % str2latex(
tableLogName) + caption
if os.path.exists(tableLogNameNoSuffix + '.tex'):
import time
statsDate = time.ctime(
os.path.getmtime(tableLogNameNoSuffix + '.tex'))
comments += ' Stats carried out on ' + statsDate
headers = ['Variable', ifNames[0], ifNames[1], r'$p$(equal)']
body = []
from dictTrees import dictTree
statst = dictTree(tonumeric(descStats2),
['var', 'if']).singletLeavesAsDicts()
for vvv in showVars:
tE = statst[vvv][ifNames[0]]
xs1, ses1 = latexFormatEstimateWithPvalue(
[tE['mean'], tE['se']],
pval=tE['p'],
allowZeroSE=None,
tstat=False,
gray=False,
convertStrings=True,
threeSigDigs=None)
tE = statst[vvv][ifNames[1]]
xs2, ses2 = latexFormatEstimateWithPvalue(
[tE['mean'], tE['se']],
pval=tE['p'],
allowZeroSE=None,
tstat=False,
gray=False,
convertStrings=True,
threeSigDigs=None)
body += [
[vvv, xs1, xs2, '?'],
['', ses1, ses2, ''],
]
cpblTableStyC(
cpblTableElements(
body='\\\\ \n'.join(['&'.join(LL) for LL in body]) +
'\\\\ \n\\cline{1-\\ctNtabCols}\n ',
format=None,
firstPageHeader=' & '.join(headers) +
'\\\\ \n\\hline\\hline\n',
otherPageHeader=None,
tableTitle=None,
caption=None,
label=None,
ncols=None,
nrows=None,
footer=None,
tableName=None,
landscape=None),
filepath=tableLogNameNoSuffix + '-compareMeans-' +
str2pathname('-'.join(ifNames)) + '.tex',
masterLatexFile=self)
## # 2010 Jun: Also create a .csv file *from* the .tex.
## from cpblUtilities import cpblTableToCSV
## fout=open(tableLogName+'-%d.csv'%iIf,'wt')
## tmpCSV=cpblTableToCSV(tableLogName+'-%d.tex'%iIf)
## fout.write( tmpCSV)
## fout.close()
## if ifNames and len(ifcondition)>1:
## allIfsCSV+='\n\n'+anifcond+'\n\n'+tmpCSV
# And return the stata code if needed.
if execStata: # Stata was called, above, if needed, ie the stata executable code has already been used.
return () #descStats)
else: # We will run this later, when doing regressions...
return (outPrint) #,descStats)
################################################################
def closeAndCompile(self,
launch=True,
closeOnly=False,
compileOnly=False,
showStatsFor=None,
statsCondition=None,
noStats=False): # ,dataFile=None
"""
Dec 2009: New argument, DTA, allows specification of a stata file which can be used to produce means, etc. for the variables that have been used. So this is most useful when all the regressions are from the same dataset????
Well, for now this will use addDescriptiveStatistics, a new member function that I guess will look like the one already written for my codebook class??
To make this produce a table, ust use latexfile.updateSettings ... Can no longer specify a DTA file above.
When outside RDC, use noStats=True to skip the descriptive statistics.
What is "launch"? It seems not used.
"""
if statsCondition == None:
statsCondition = ' 1 '
# Add a table of descriptive statistics, if it makes any sense:
if not compileOnly and not noStats:
if self.codebook: # and isinstance(self.codebook,stataCodebookClass):
self.addDescriptiveStatistics(
codebook=self.codebook,
logname='end',
showVars=showStatsFor,
ifcondition=statsCondition)
#elif self.codebook and isinstance(self.codebook,str):
# self.addDescriptiveStatistics(DTA=DTA,logname='end')
elif self.mainDataFile:
self.addDescriptiveStatistics(
dataFile=self.mainDataFile,
logname='end',
showVars=showStatsFor,
ifcondition=statsCondition)
else:
#not DTA and not compileOnly and not self.mainDataFile:
print ' * SUGGESTION: closeAndCompile: Why have you not specified a codebook or DTA to generate a codebook, so as to get means of the variables used?'
if self.lfileTeXbody == '': # obselete: or (self.lfileTeX==self.lfileTeX_original): # part following or is old /junk
print 'There is no LaTeX code accumulated to compile'
return (self.fpathname + '.tex')
if not compileOnly:
lfile = open(self.fpathname + '.tex', 'wt', encoding='utf-8')
lfile.write(self.lfileTeXwrapper[0] + self.lfileTeXbody +
self.lfileTeXwrapper[
1]) # Write entire, accumulated contents of file.
lfile.close()
print 'Completing LaTeX file %s...' % (self.fpathname + '.tex')
# And compile the latex output:
# Freakin' windows can't do an atomic rename when target exists... So first line is necessary for MS only
#if os.access(defaults['paths']['tex']+'tables-allCR.tex',os.F_OK):
# os.remove(defaults['paths']['tex']+'tables-allCR.tex')
# And now compile the LaTeX:
if not closeOnly:
doSystemLatex(
self.fname
) #launch=launch,tex=None,viewLatestSuccess=True,bgCompile=True)
return (self.fpathname + '.tex')
###########################################################################################
###
def getTeXcode(self):
###
#######################################################################################
return (self.lfileTeXbody)
###########################################################################################
###
def addCorrelationTable(self,
tablename,
corrvars,
ifClause=None,
threeSigDigs=False,
showSignificance=True,
variableOrder=None,
comments=None):
###
#######################################################################################
"""
2010 Nov.
Interface to mkcorr, which produces a tabular output of pairwise correlations.
So, the output here is in a designated file, not logged to Stata stdout.
mkcorr also logs descriptive stats! so I could use this as yet another way to make a descriptive stats table....
Works nicely. Test case by running this module/file.
Here are other options for correlation tables: [I bet ?I could have used the matrix one and my readmatrix function]
Publication-style correlation matrix (corrtab):
findit corrtab
help corrtab
corrtab read write math science, obs sig bonf vsort(read) format(%9.2f)
corrtab read write math science, cwd obs sig bonf vsort(read) format(%9.2f)
corrtab read write math science, spearman obs sig bonf vsort(read) format(%9.2f)
Note: pairwise (the equivalent of pwcorr in Stata) is the default unless cwd
(casewise, equivalent to corr in Stata) is not specified. Spearman may also be
specified. corrtab is designed for a maximum of eight variables.
Publication-style correlation matrix (makematrix) :
findit makematrix
help makematrix
makematrix, from(r(rho)) format(%9.2f): corr read-socst
makematrix, from(r(rho)) col(socst) format(%9.2f): corr read-socst
Jan 2011: Huh? But there is no "N" recorded in the log file for each correlation...
2018: If you have data in Python, just use the non-Stata version of this in my pandas_utils.py
"""
tablenamel = self.generateLongTableName(tablename)
tableLogName = defaults['paths']['stata']['tex'] + str2pathname(
tablenamel) + '.log' # Actually, a TSV file
tableFilePath = defaults['paths']['stata']['tex'] + str2pathname(
tablenamel) # for .tex , below
if os.path.exists(tableLogName):
#cells=tsvToDict(tableLogName)
cells = [
LL.strip('\n').split('\t')
for LL in open(tableLogName, 'rt').readlines()
]
assert all([cell[-1]=='' for cell in cells]) # Bug in Stata's mkcorr? extra column. arghhhh
header = cells[0][:-1]
varsOrder = header[5:] # 1:5 are mean, s.d., min, max.
np = (len(cells) - 1) / 2
assert np == len(varsOrder)
pcorrs = {}
for iv, vva in enumerate(varsOrder):
for jv, vvb in enumerate(
varsOrder[:1 + iv]): # Skip upper right hand triangle
pcorrs[vva] = pcorrs.get(vva, {})
pcorrs[vvb] = pcorrs.get(vvb, {})
r, p = cells[iv * 2 + 1][jv + 5], cells[iv * 2 + 2][jv + 5]
from cpblUtilities import NaN
fp = NaN
if p.startswith('('):
if p == '(0.00000)':
p = r'{\coefpSmall{$<$10$^{-5}$}}'
fp = 0.00001
elif p == '(1.00000)':
p = 1
fp = NaN
else:
fp = float(p[1:-1])
p = chooseSFormat(
p[1:-1],
convertStrings=True,
threeSigDigs=threeSigDigs,
conditionalWrapper=[r'\coefp{', '}']
) #,lowCutoff=None,lowCutoffOOM=True,convertStrings=False,highCutoff=1e6,noTeX=False,threeSigDigs=False,se=None):
else:
assert p in ['']
# Following plan fails if I want to be able to manipulate order of variables later:
#if r=='1.00000': # Since using upper right triangle, rather than lower left, can hide diagonal here.
# r=''
r = chooseSFormat(
r, convertStrings=True, threeSigDigs=threeSigDigs)
if p and showSignificance:
significanceString = ([' '] + [
tt[0] for tt in significanceTable
if fp <= tt[2] / 100.0
])[-1]
r = significanceString + r + '}' * (
not not significanceString)
pcorrs[vva][vvb] = dict(b=r, p=p)
# Must have symmetric matrix if I am able to change order of variables...dict(b='',p='') # This hides lower left triangle. If you did these the opposite way around, it would hide diagonal AND upper right triangle.
pcorrs[vvb][vva] = dict(b=r, p=p)
debugprint(vva, vvb, iv, jv, '---------', pcorrs[vva][vvb])
# NOW PRODUCE OUTPUT TABLE
# I'll have to revise this a bunch if I want to put the descriptive statistics back in.
if variableOrder:
varsOrder = orderListByRule(varsOrder, variableOrder)
body = ''
# I want values show in top left triangle, so first var in varsOrder has easiest to read corrs.
for iv, vv in enumerate(varsOrder[:-1]):
assert '#' not in vv
tworows = formatPairedRow([[
r'\sltrheadername{%s}' % str2latex(
substitutedNames(vv, self.substitutions))
] + [
pcorrs[vv][vvb]['b'] * (ivvb >= iv)
for ivvb, vvb in enumerate(varsOrder[1:])
], [''] + [
pcorrs[vv][vvb]['p'] * (ivvb >= iv)
for ivvb, vvb in enumerate(varsOrder[1:])
]])
body+= '\t& '.join([cc for cc in tworows[0]])+'\\\\ \n'+r'\showSEs{'+\
'\t& '.join([cc for cc in tworows[1]]) +' \\\\ }{}\n'
body += r'\hline ' + '\n' # Separate the coefs from extralines..
headersLine = '\t&'.join([''] + [
r'\begin{sideways}\sltcheadername{%s}\end{sideways}'
% substitutedNames(vv, self.substitutions)
for vv in varsOrder[1:]
]) + '\\\\ \n' + r'\hline'
comments = [comments, ''][int(comments is None)]
includeTeX, callerTeX = cpblTableStyC(
cpblTableElements(
body=body,
cformat='c' * (len(varsOrder) + 2),
firstPageHeader=r'\ctSubsequentHeaders \hline ',
otherPageHeader=headersLine,
tableTitle=None,
caption=r'[Correlations among key variables]{Correlations among key variables. '
+ comments + '}',
label='tab:Correlations',
ncols=None,
nrows=None,
footer=None,
tableName=tablename,
landscape=None),
filepath=tableFilePath)
# Should I have just written a composeLatexOtherTable() for above?
#if transposedChoice:
# tableFilePath=tableFilePath+'-transposed'
fout = open(tableFilePath + '.tex', 'wt', encoding='utf-8')
fout.write(includeTeX)
fout.close()
# 2010 Jan: Also create a .csv file *from* the .tex.
###from cpblUtilities import cpblTableToCSV
fout = open(tableFilePath + '-tex.csv', 'wt')
fout.write(tableToTSV(includeTeX))
fout.close()
self.append(r'\newpage ' + callerTeX.replace(
'PUT-TABLETEX-FILEPATH-HERE',
tableFilePath.replace(defaults['paths']['tex'], r'\texdocs '))
+ '\n\n')
# Also create a standalone PDF of this table
cpblTable_to_PDF(tableFilePath, aftertabulartex = r' \\ {\footnotesize\cpblColourLegend} ')
return ("""
* BEGIN mkcorr: produce a correlation table in output/TSV format.
mkcorr {corrvars} {ifClause},log({tableLogName}) sig cdec(5) replace means
* END mkcorr
""".format(corrvars = corrvars, tableLogName = tableLogName, ifClause = '' if ifClause is None else ifClause if ifClause.strip().startswith('if') else ' if '+ifClause))
###########################################################################################
###
def test_addCorrelationTable(latex=None):
###
#######################################################################################
assert latex
return ("""
gzuse macroNov2010,clear
""" + latex.addCorrelationTable(
'testMkCorr', 'gwp_beta*', ifClause=None) + """
""")
###########################################################################################
###
def writeMultiSurveyCodebookTable(self,
surveys,
vars=None,
findAllCommonVars=False,
tablename='tmpxxx',
maxVars=1e6):
###
#######################################################################################
"""
May 2011. Sorry if this already exists somewhow.
So, it's easy to add more kinds of output to this. Just make many.
For instance, I want to make one with means in place of check marks.
findAllCommonVars: a special mode, alternative to specifying a subset of vars explicitly. ... not done yet.
If all vars allowed, let's just sort them by (a) how many surveys they're in, and (b) alphabet. [(b) not done]
"""
# Load up surveys
cb = {}
vvs = []
for survey in surveys:
cb[survey] = stataCodebookClass(survey=survey)
for vv in cb[survey]:
cb[survey][vv].update(dict(survey=survey, vv=vv))
vvs += [cb[survey][vv]]
# So, thanks to labelling each entry with survey, I can reorder:
from dictTrees import dictTree
byVV = dictTree(vvs, ['vv', 'survey'])
vvk = byVV.keys()
# Sort by number of surveys that include the variable
vvk.sort(reverse=True, key=lambda x: len(byVV[x]))
# Sort by whether it's derived or raw variable
vvk.sort(
reverse=True, key=lambda x: 'rawname' in byVV[x].values()[0][0])
if 0 and len(vvk) > maxVars:
vvk = vvk[:maxVars]
from cpblUtilities import cpblTableStyC
tablenamel = self.generateLongTableName(tablename)
tableLogName = defaults['paths']['stata']['tex'] + str2pathname(
tablenamel) + '.log' # Actually, a TSV file
tableFilePath = defaults['paths']['stata']['tex'] + str2pathname(
tablenamel) # for .tex , below
cpblTableStyC(
cpblTableElements(
body='\n'.join([
str2latex(avv.replace('_', '-')) + '\t& ' + '\t& '.join([
r'$\checkmark$' * (asurvey in byVV[avv])
for asurvey in surveys
]) + r'\\' for avv in vvk
]),
cformat=None,
firstPageHeader=' & '.join([''] + surveys) + r'\\',
otherPageHeader=None,
tableTitle=None,
caption=None,
label=None,
ncols=len(surveys) + 1,
nrows=len(vvk),
footer=None,
tableName=tablename,
landscape=None),
filepath=paths['tex'] + str2pathname(tablename) + '.tex',
masterLatexFile=self)
return ()
###########################################################################################
###
def semiRollingRegression(self,
statamodel=None,
rollingCovariates=None,
rollingX=None,
ordinalRollingX=None,
tablename=None,
variableOrder=None,
rollCovColourLookup=None,
includeConstant=True,
showConstant=True,
suppressFromPlot=None,
nSegments=None,
alsoDoFullyPiecewise=True,
weight=None):
###
#######################################################################################
"""
Sample usage:
stataout=stata.doHeader+stata.stataLoad(WP+'usavignettes')
outdict=latex.semiRollingRegression(statamodel='reg Answer '+' '.join(nonrollingvars),rollingCovariates=['VLNINC'], rollingX='VLNINC',ordinalRollingX=None,tablename='rollingtest',variableOrder=None,rollCovColourLookup=None,includeConstant=True,suppressFromPlot=None,nSegments=5,alsoDoFullyPiecewise=True,weight=None)
stataout+=outdict['statatext']
That is, I often use the same variable as the rollingCovariate and the rollingX.
N.B. Return value is a dict: return({'statatext':outStata,'models':lmodels,'figures':figs})
statamodel: This is a Stata regression call, but with the rollingCovariates variables missing from the call.. They will be added in.
rollingCovariates: a list of variable names: # These are the covariates of SWBs whose coefficients we may want to estimate as a function of xrollings. You now have the option of setting these in a flag for each model (e.g. via the Stata source format), rather than globally as a parameter. I hesitate to add this feature, since it makes things more complex...
rollingX: the rolling variable. The sample is split into sections based on an ordinal version of this. This is the variable that should appear on the abscissa in the plot.
ordinalRollingX:
For now, let's assume that an ordinalRollingX exists, and the data are loaded. ie an ordinal version of the rolling variable, rollingX, exists.
[may 2012: meaning I have to supply both rollingX and ordinalRollingX?? I've now tried to make it create one if it's not given. But not tested properly yet.]
variableOrder: need not include the rollingCovariates, of course.
rollCovColourLookup=a dict that gives a plotting color for each rollingCovariate
showConstant=True: set this to False to suppress the constant in the plot that's created.
TO DO: There should
Constants.:
. do i need constants for each quantile too?
Constants: It's now an optional argument. One basically always needs to include "nocons" as an option to your regression call, but I'm not enforcing this (.). If you don't set includeConstant=False and provide your own, this function will generate one per segment and include its constant in the rollingCovariates.
Oct 2011: Oh. I cannot use beta with this, as it will normalize across the divisions in a very weird way. Well... actually, there should be the same number of respondents, roughly, in each. But certainly using constants with nSegments>1 and beta gives garbage / nonsense...
So if you want to calculate betas, you should do a series of separate regressions. [oct 2011]
May 2012: Great idea is to Add in the option to also do fully-piecewise/full-rolling (not semi-rolling) regression, as a robustness test. I seem to have written this "rollingRegression" in pystata. It's not yet integrated into pystataLatex. Do so...
"""
DVNN = 'dependent var, its name'
#assert not weight is None # Until 2015, you need to specify weight is False or give a weight, since this started out assuming a weight.
outStata = ''
xrollings = [
['lnGDPpc', 'gdprank'], # country mean income
[
'lnadjHHincome', 'tgrank'
], # absolute own income (log or not are same if get rid of zeros?)
['gwp_rankAdjHHincome', 'crank'], # Rank within own country
[
'grank', 'grank'
], # rank in globe. ..BUt there's no difference between grank and tgrank.?
][1:2]
# Start by processing the statamodel commands, in case we need to check it to determine rollingCovariates.
dummyLatex = latexRegressionFile('dummyLRF')
dummymodels = dummyLatex.str2models(statamodel)
if suppressFromPlot is None:
suppressFromPlot = []
if showConstant is False:
suppressFromPlot += ['qconstant']
# Determine rollingCovariates:
allRollingCovariates = [
xx
for xx in flattenList(
[
dgetget(mm, ['flags', 'rollingCovariates'], '').split(' ')
for mm in dummymodels
],
unique=True) if xx
] # List of all covariates in flags.
assert rollingCovariates is None or not any(allRollingCovariates)
assert rollingCovariates is not None or all([
dgetget(mm, ['flags', 'rollingCovariates'], '')
for mm in dummymodels
])
if rollingCovariates is not None:
allRollingCovariates = rollingCovariates
assert isinstance(allRollingCovariates,
list) # this should be earlier..
# Determine kernel width (well, number of segments): (uh, well, I am not allowing this to be set per model by flag, yet. So just use the function parameter):
if nSegments is None:
nSegments = 10
leftlim = arange(0, 1.0 + 1.0 / nSegments, 1.0 / nSegments)
# # rollingCovariates=['lnadjHHincome','gwp_rankAdjHHincome','dZeroishIncome','grank'] #'lnGDPpc',
covRollingNames = {}
outModel = includeConstant * """
capture noisily drop qconstant
gen qconstant=1
"""
if rollingX is not None and ordinalRollingX is None:
outModel += """
* We'll need to know quantiles of the x rolling variable:
capture noisily drop tmpOrd%(rx)s
capture noisily drop _ord%(rx)s
egen tmpOrd%(rx)s=rank(%(rx)s)
sum tmpOrd%(rx)s
gen _ord%(rx)s=(tmpOrd%(rx)s-r(min))/(r(max)-r(min))
""" % {
'rx': rollingX
}
ordinalRollingX = '_ord' + rollingX
ifs = [
' %s >= %f & %s < %f ' %
(ordinalRollingX, leftlim[iL], ordinalRollingX, leftlim[iL + 1])
for iL in range(len(leftlim) - 1)
]
print 'There are %d ifs for ns=%d' % (len(ifs), nSegments)
for yrolling in allRollingCovariates + includeConstant * [
'qconstant'
]: # Prepare each of the rolling independent variables by making piecewise versions of them.
covRollingNames[yrolling] = []
# Create a series of dummies
assert len(leftlim) < 100
outModel += """
* Let's recreate these
capture noisily drop """ + yrolling + '_q*' + """
"""
for iL in range(len(leftlim) - 1):
outModel += """
gen """ + yrolling + '_q%02d%02d' % (
iL, len(leftlim) - 1
) + ' = ' + yrolling + ' * (' + ifs[iL] + """)
"""
covRollingNames[yrolling].append(yrolling + '_q%02d%02d' % (
iL, len(leftlim) - 1))
#covRollingNames.get(yrolling,'')+' '+yrolling+'_q%02d%02d'%(iL,len(leftlim)-1))+' '
#yRollingNames.sort()
allRollingCovNames = flattenList(
covRollingNames.values()
) # So this is just a list of all the _q variables names, in order passed.
models = []
# I think I should do the betas in a fully-segmented regressions, not the partial-rolling. Otherwise the normalisation is weird, no? Or just multiply all the betas by 10?
lmodels = self.str2models(
outModel + """
""" + statamodel + """
""",
before="loadData"
) # CAREFUL: is there somethign othe than load data I can say here?..?
# Add in the rolling covariates, the versions with piecewise nonzero values:
for mmm in lmodels:
if rollingCovariates is None: # I will probably just make it so flags always get set, above.
mmm['model'] = ' ' + ' '.join(
flattenList([
covRollingNames[vvv]
for vvv in dgetget(mmm, [
'flags', 'rollingCovariates'
], ' ').split(' ') + includeConstant * ['qconstant']
])) + ' ' + mmm['model']
else:
mmm['model'] = ' ' + ' '.join(
allRollingCovNames) + ' ' + mmm['model']
# So the following ifs are in terms of ordinalRollingX, but the means are of rollingX:
mmm['code']['sumsAfter'] = generate_postEstimate_sums_by_condition(
rollingX, ifs) #'tgrank crank'
outStata += self.regTable(
tablename,
lmodels,
returnModels=True,
variableOrder=(variableOrder if variableOrder is not None else [])
+ allRollingCovNames,
transposed=False,
comments=r"""
Rolling $x$ (quantile) variable: %s%s in %d segments.
""" % (rollingX, (not rollingX == ordinalRollingX) *
(' (%s)' % ordinalRollingX), nSegments))
# What if "beta" was used in the regression call?
combineVars = [[avvv, 'z_' + avvv] for avvv in allRollingCovNames]
if rollCovColourLookup is None: # Then assign it automagically? Using rainbow?
ccc = getIndexedColormap(None, len(rollingCovariates))
rollCovColourLookup = dict(
[[avvv, ccc[ia]] for ia, avvv in enumerate(rollingCovariates)])
if includeConstant and 'qconstant' not in rollCovColourLookup:
rollCovColourLookup['qconstant'] = 'k'
if alsoDoFullyPiecewise:
pass
# Now plot them?
sef = 1.96
plt.close('all')
figs = []
for ifigure, mm in enumerate(lmodels):
if 'estcoefs' not in mm:
continue
# Oh, this is ugly. why is beta not a 'special' flag or etc?
if '$\\beta$ coefs' in mm['textralines']:
mm['flags']['beta'] = True
print 'implement a beta flag in regtable *********** TO DO (to clean up)'
NoWay_CANNOT_DO_BETA_WITH_ROLLING_
# Find the x-values of the xrolling variable, corresponding to the if groups.
ifs = sorted(mm['subSums'].keys())
mm['x' + rollingX] = array(
[mm['subSums'][anif][rollingX]['mean'] for anif in ifs])
mm['xstep' + rollingX] = array([
xx
for xx in flatten([leftlim[0]] + [[xx, xx] for xx in leftlim[
1:-1]] + [leftlim[-1]])
])
mm['xstep' + rollingX] = array([
ii
for ii in flatten([[
mm['subSums'][anif][rollingX]['min'],
mm['subSums'][anif][rollingX]['max']
] for anif in ifs])
])
#mm['xstep'+rollingX]=array(flattenList([leftlim[0]]+ [[xx,xx] for xx in leftlim[1:-1]] , leftlim[-1]]))
mm['sex' + rollingX] = array(
[mm['subSums'][anif][rollingX]['seMean'] for anif in ifs])
plt.figure(120 + ifigure)
plt.clf()
ax = plt.subplot(111)
figs += [plt.gcf()]
# And extract the coefficients determined for this SWB measure and this xrolling variable:
for icv, cvv in enumerate([
vvv
for vvv in allRollingCovariates + includeConstant *
['qconstant']
if vvv not in suppressFromPlot and any([
vv for vv in mm['estcoefs'].keys()
if vv.startswith(vvv)
])
]):
# for ixv,xv in enumerate(mm['subSums'][ifs[0]].keys()):
cvvn = substitutedNames(cvv)
cvvs = sorted(
[vvv for vvv in mm['estcoefs'] if vvv.startswith(cvv)])
# No. Above is dangerous. It means that some might be missing. Let's force it to be what we expect. Oh, but this will cause trouble if the Stata log is out of date? too bad. Use NaNs:
cvvs = covRollingNames[cvv]
if not len(ifs) == len(cvvs): # Need to re-run Stata..
continue
assert len(ifs) == len(cvvs)
if 0:
mm['b' + cvv] = array(
[mm['estcoefs'][vv]['b'] for vv in cvvs])
mm['bstep' + cvv] = array(
flattenList([[
mm['estcoefs'][vv]['b'], mm['estcoefs'][vv]['b']
] for vv in cvvs]))
mm['sebstep' + cvv] = array(
flattenList([[
mm['estcoefs'][vv]['se'], mm['estcoefs'][vv]['se']
] for vv in cvvs]))
mm['seb' + cvv] = array(
[mm['estcoefs'][vv]['se'] for vv in cvvs])
mm['b' + cvv] = array(
[dgetget(mm, ['estcoefs', vv, 'b'], NaN) for vv in cvvs])
mm['bstep' + cvv] = array(
flattenList([
[
dgetget(mm, ['estcoefs', vv, 'b'], NaN), dgetget(
mm, ['estcoefs', vv, 'b'], NaN)
] for vv in cvvs
])) #mm['estcoefs'][vv]['b'],mm['estcoefs'][vv]['b']
mm['se_bstep' + cvv] = array(
flattenList([[
dgetget(mm, ['estcoefs', vv, 'se'], NaN), dgetget(
mm, ['estcoefs', vv, 'se'], NaN)
] for vv in cvvs]))
mm['se_b' + cvv] = array(
[dgetget(mm, ['estcoefs', vv, 'se'], NaN) for vv in cvvs])
df = pd.DataFrame(
dict([[avn, mm[avn]]
for avn in ['x' + rollingX, 'b' + cvv, 'se_b' + cvv]
]))
# Above should really be merged across covariates... etc
print('Plotting %s now with envelope...?' % cvv)
dfPlotWithEnvelope(
df,
'x' + rollingX,
'b' + cvv,
color=rollCovColourLookup[cvv],
label=cvvn,
labelson='patch',
ax=ax)
# plotWithEnvelope(mm['x'+rollingX], mm['b'+cvv],mm['b'+cvv]-sef*mm['seb'+cvv],mm['b'+cvv]+sef*mm['seb'+cvv],linestyle='-',linecolor=rollCovColourLookup[cvv],facecolor=rollCovColourLookup[cvv],lineLabel=cvvn, laxSkipNaNsXY=True)
if 0: #for
plotWithEnvelope(
mm['xstep' + rollingX],
mm['bstep' + cvv],
mm['bstep' + cvv] - sef * mm['sebstep' + cvv],
mm['bstep' + cvv] + sef * mm['sebstep' + cvv],
linestyle='--',
linecolor=rollCovColourLookup[cvv],
facecolor=rollCovColourLookup[cvv],
lineLabel=cvvn)
#x,y,yLow,yHigh,linestyle='-',linecolor=None,facecolor=None,alpha=0.5,label=None,lineLabel=None,patchLabel=None,laxSkipNaNsSE=False,laxSkipNaNsXY=False,skipZeroSE=False,ax=None,laxFail=True):
#envelopePatch=plt.fill_between(mm['x'+ordinalRollingX],mm['b'+cvv]-sef*mm['seb'+cvv],mm['b'+cvv]+sef*mm['seb'+cvv],facecolor=qColours[icv],alpha=.5)#,linewidth=0,label=patchLabel)# edgecolor=None, does not work.. So use line
#plt.plot(mm['x'+ordinalRollingX],mm['b'+cvv],linestyle=qColours[icv]+'.-',label=cvv)#'resid'+' loess fit'+resid)
# yerr=sef*mm['seb'+cvv],
# If aborted above loop due to Stata needing to be re-run:
plt.xlabel(substitutedNames(rollingX))
plt.ylabel('Raw coefficients (for %s)' %
substitutedNames(mm['depvar']))
if 'beta' in mm.get('flags', []):
plt.ylabel(r'Standardized $\beta$ coefficients (for %s)' %
mm['depvar'])
NoWay_CANNOT_DO_BETA_WITH_ROLLING_
plt.plot(plt.xlim(), [0, 0], 'k:', zorder=-100)
# This should be removed.. Not general. Can return figs, so no need to do this here.
if 'lnGDPpc' in mm['estcoefs']:
y, yse = mm['estcoefs']['lnGDPpc']['b'], mm['estcoefs'][
'lnGDPpc']['se']
plotWithEnvelope(
plt.xlim(), [y, y], [y - sef * yse, y - sef * yse],
[y + sef * yse, y + sef * yse],
linestyle='-',
linecolor='k',
facecolor='k',
lineLabel=substitutedNames('lnGDPpc'))
plt.text(plt.xlim()[1], y, r'$b_{\log(GDP/cap)}$')
comments = r'$b_{\log(GDP/cap)}=%.02f\pm%.02f$' % (
mm['estcoefs']['lnGDPpc']['b'], mm['estcoefs']['lnGDPpc']['se']
) if 'lnGDPpc' in mm['estcoefs'] else ''
comments += {1.96: r'95\% c.i.'}[sef]
transLegend(comments=comments) #
pltcomments = 'Coefficients in estimate of ' + substitutedNames(
mm['depvar']) + ' (with ' + ';'.join(
mm.get('flags', {}).keys()).replace(
' ',
'') + ')' + mm['texModelNum'] + mm['tableName'][:4]
self.saveAndIncludeFig(
figname=('TMP%s-%02d' % (tablename, ifigure) + '-'.join(
[rollingX, mm['name']] + mm.get('flags', {}).keys())
).replace(' ', ''),
caption=pltcomments)
return ({'statatext': outStata, 'models': lmodels, 'figures': figs})
###########################################################################################
###
def coefficientsOnIndicators(self,
statamodel=None,
indicators=None,
rollingXvar=None,
tablename=None,
variableOrder=None,
rollCovColourLookup=None,
includeConstant=True,
nSegments=None):
###
#######################################################################################
"""
see regressionsDaily.
Not written yet.
"""
def substitutedNames(self, names):
"""
See pystata's substituted names!
"""
return (substitutedNames(names, self.substitutions))
def plotAgeCoefficients(self, models):
"""
overlay plots of the age coefficient predicted components for all models passed.
2014: Sanity check: does this make sense to keep/use? Is it really generally useful? I don't think I like quartics anymore. Just use dummies for age ranges if you really want to take into account age effects.
"""
from pylab import arange, plot, figure, gcf, clf, show
from cpblUtilities import transLegend
figure(345)
clf()
for mm in models:
if 'estcoefs' not in mm:
continue
if 'beta' in mm['regoptions'] or dgetget(
mm, ['textralines', '$\\beta$ coefs'],
'') in ['\\YesMark']:
continue
ageCoefs = [
dgetget(mm, ['estcoefs', cc, 'b'], 0)
for cc in 'age100', 'agesq100', 'agecu100', 'agefo100'
]
age = arange(0, 100)
plot(
age,
sum([ageCoefs[ii] * (age / 100.0)**ii for ii in range(4)]),
hold=True,
label=mm['texModelNum'] + ': ' + str2latex(mm['model']))
transLegend()
return (gcf())
def models2df(self, models):
# Use the function defined outside the latex class?
return (models2df(models, latex=self))
def oaxacaThreeWays(
self,
tablename,
model,
groupConditions,
groupNames,
datafile=None,
preamble=None, # This nearly-must include stataLoad(datafile)
referenceModel=None,
referenceModelName=None,
savedModel=None,
oaxacaOptions=None,
dlist=None,
rerun=True,
substitutions=None,
commonOrder=True,
skipStata=False,
figsize=None):
# For example usage of this function, see regressionsAboriginals2015;
import time
if substitutions is None: substitutions = self.substitutions
# Choose an output do-file and log-file name
tablenamel = self.generateLongTableName(tablename, skipStata=skipStata)
tableLogName = defaults['paths']['stata']['tex'] + tablenamel
tableLogNameWithDate = defaults['paths']['stata'][
'working'] + 'logs/' + tablenamel + time.strftime(
'%Y_%m_%d_%H%M%S_') + '.log'
preamble = '' if preamble is None else preamble
if self.skipStataForCompletedTables and os.path.exists(tableLogName +
'.log'):
if not skipStata:
print ' Skipping Stata for %s because latex.skipStataForCompletedTables is set ON!!!! and this table is done.' % tablenamel
outs += """
"""
skipStata = True
# Generate the Stata code. As a matter of pracice, we should alwas include the file loading INSIDE the logfile (ie caller should specify datafile)
statacode = """
log using """ + tableLogName + """.log, text replace
"""
statacode += '' if datafile is None else stataLoad(datafile)
statacode += '' if preamble is None else preamble
statacode += oaxacaThreeWays_generate(
model=model,
groupConditions=groupConditions,
groupNames=groupNames,
referenceModel=referenceModel,
referenceModelName=referenceModelName,
oaxacaOptions=oaxacaOptions,
dlist=dlist, )
statacode += '\n log close \n'
if os.path.exists(tableLogName + '.log'):
models = oaxacaThreeWays_parse(
tableLogName, substitutions=substitutions)
else:
print(' Did not find Blinder-Oaxaca log file for %s: rerun Stata.'
% tableLogName)
models = []
# NOW MAKE A PLOT OF THE FINDINGS: SUBSAMPLE DIFFERENCE ACCOUNTING
for imodel, model in enumerate(models):
depvar = model['depvar']
subsamp = model['subsamp']
basecase = model['basecase']
tooSmallToPlot = {subsamp: []}
from cifarColours import colours
import pylab as plt
from cpblUtilities import figureFontSetup, categoryBarPlot
import numpy as np
plt.ioff()
figureFontSetup()
plt.figure(217, figsize=figsize)
plt.clf()
"""
What is the logic here? I want to
- eliminate "constant".
- order variables according to magnitude of effect, except if showvars specified.
- include the grouped variables and not their contents
"""
plotvars = [
vv for vv in model['diffpredictions_se'][subsamp].keys()
if not vv in [model['depvar'], 'constant', 'Total']
]
plotvars.sort(
key=lambda x: abs(model['diffpredictions'][subsamp][x])
) #abs(array([model['diffpredictions'][subsamp][vv] for vv in plotvars])))
plotvars.reverse()
rhsvars = plotvars
cutoffTooSmallToPlot = .01 # If you change this, change the %.2f below, too
tooSmallToPlot[subsamp] += [
vv for vv in rhsvars
if (abs(model['diffpredictions'][subsamp][vv]) + 2 * abs(
model['diffpredictions_se'][subsamp][vv])
) / abs(model['diffLHS']) < cutoffTooSmallToPlot and vv
not in ['constant'] and vv in plotvars
]
omittedComments = ''
if tooSmallToPlot[subsamp]:
omittedComments = ' The following variables are not shown because their contribution was estimated with 95\\%% confidence to be less than %.2f of the predicted difference: %s. ' % (
cutoffTooSmallToPlot, '; '.join(tooSmallToPlot[subsamp]))
plotvars = [
cv for cv in plotvars if cv not in tooSmallToPlot[subsamp]
]
if commonOrder and ioaxaca > 0:
plotvars = lastPlotVars
else:
lastPlotVars = plotvars
labelLoc = 'eitherSideOfZero'
labelLoc = None #['left','right'][int(model['diffLHS'][subsamp]>0)]
DV = substitutedNames(model['depvar'], substitutions)
cbph = categoryBarPlot(
np.array([r'$\Delta$' + DV, r'predicted $\Delta$' + DV] +
plotvars),
np.array([
model['diffLHS'],
model['diffpredictions'][subsamp]['Total']
] + [model['diffpredictions'][subsamp][vv]
for vv in plotvars]),
labelLoc=labelLoc,
sortDecreasing=False,
yerr=np.array([
model['diffLHS_se'],
model['diffpredictions_se'][subsamp]['Total']
] + [
model['diffpredictions_se'][subsamp][vv] for vv in plotvars
]),
barColour={
r'$\Delta$' + DV: colours['darkgreen'],
r'predicted $\Delta$' + DV: colours['green']
})
#plt.figlegend(yerr,['SS','ww'],'lower left')
assert DV in [
'swl', 'SWL', 'ladder', '{\\em nation:}~ladder', 'lifeToday'
] # model['depvar'] needs to be in the two lookup tables in following two lines:
shortLHSname = {
'SWL': 'SWL',
'swl': 'SWL',
'lifeToday': 'life today',
'ladder': 'ladder',
'{\\em nation:}~ladder': 'ladder'
}[DV]
longLHSname = {
'SWL': 'satisfaction with life (SWL)',
'swl': 'satisfaction with life (SWL)',
'lifeToday': 'life today',
'ladder': 'Cantril ladder',
'{\\em nation:}~ladder': 'Cantril ladder'
}[DV]
# Could put here translations
xxx = plt.legend(cbph['bars'][0:3], [
r'$\Delta$' + shortLHSname + ' observed', r'$\Delta$' +
shortLHSname + ' explained', 'explained contribution'
], {True: 'lower left',
False: 'lower right'}[abs(plt.xlim()[0]) > abs(plt.xlim()[1])])
xxx.get_frame().set_alpha(0.5)
# Could you epxlain the following if??
if 0 and plotparams.get('showTitle', False) == True:
plt.title(model['name'] + ': ' + subsamp +
': differences from ' + basecase)
plt.title(
"Accounting for %s's life satisfaction difference from %s"
% (subsamp, basecase))
title = ''
caption = ''
else:
title = r"Accounting for %s's life satisfaction difference from %s \ctDraftComment{(%s) col (%d)}" % (
subsamp, basecase, model['name'], model['modelNum'])
caption = title
plt.xlabel(r'$\Delta$ %s' % shortLHSname)
#plt.subtitle('Error bars show two standard error widths')
plt.xlabel('Mean and explained difference in ' + longLHSname)
plt.ylim(-1, len(plotvars) +
3) # Give just one bar space on top and bottom.
#plt.ylim(np.array(plt.ylim())+np.array([-1,1]))
if commonOrder and ioaxaca > 0:
plt.xlim(lastPlotXlim)
else:
lastPlotXlim = plt.xlim()
# Save without titles:
imageFN = paths['graphics'] + os.path.split(
tableLogName)[1] + '-using-%s%d' % (
str2pathname(model['basecase']), imodel)
needReplacePlot = fileOlderThan(imageFN + '.png',
tableLogName + '.log')
self.saveAndIncludeFig(
imageFN,
caption=None,
texwidth=None,
title=None, # It seems title is not used!
onlyPNG=False,
rcparams=None,
transparent=False,
ifany=None,
fig=None,
skipIfExists=not needReplacePlot and
self.skipSavingExistingFigures,
pauseForMissing=True)
# And store all this so that the caller could recreate a custom version of the plot (or else allow passing of plot parameters.. or a function for plotting...? Maybe if a function is offered, call that here...? So, if regTable returns models as well as TeX code, this can go back to caller. (pass pointer?)
if 'accountingPlot' not in model:
model['accountingPlot'] = {}
model['accountingPlot'][subsamp] = {
'labels': np.array(rhsvars + ['predicted ' + DV, DV]),
'y': np.array([
model['diffpredictions'][subsamp][vv] for vv in rhsvars
] + [
model['diffpredictions'][subsamp]['Total'],
model['diffLHS']
]),
'yerr': np.array([
model['diffpredictions_se'][subsamp][vv] for vv in rhsvars
] + [
model['diffpredictions_se'][subsamp]['Total'],
model['diffLHS_se']
])
}
return (statacode * (not skipStata))
################################################################################################
################################################################################################
################################################################################################
if __name__ == '__main__':
################################################################################################
################################################################################################
print ' DEMO MODE!!!!!!!!! for pystata.latexRegressions ... '
sVersion, rVersion, dVersion = 'CPBLtesting', 'XXXX', 'testing'
"""
from recodeGallup import gDataVersion, pathList, gVersion
from regressionsGallup import standardSubstitutions
runBatchSet(
sVersion,
rVersion, [testFunctions],
dVersion='testingOnly-forCPBLStataLR',
substitutions=standardSubstitutions)
"""
| gpl-2.0 |
dsquareindia/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 57 | 17557 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause |
ellisdg/3DUnetCNN | unet3d/predict.py | 1 | 39483 | import os
import time
import numpy as np
import nibabel as nib
import pandas as pd
from keras.models import load_model
from nilearn.image import resample_to_img, new_img_like
from .utils.utils import (load_json, get_nibabel_data, one_hot_image_to_label_map,
break_down_volume_into_half_size_volumes, combine_half_size_volumes)
from .utils.sequences import SubjectPredictionSequence
from .utils.pytorch.dataset import HCPSubjectDataset
from .utils.hcp import new_cifti_scalar_like, get_metric_data
from .utils.filenames import generate_hcp_filenames, load_subject_ids
from .utils.augment import generate_permutation_keys, permute_data, reverse_permute_data
def predict_data_loader(model, data_loader):
import torch
predictions = list()
with torch.no_grad():
for batch_x in data_loader:
predictions.extend(model(batch_x).cpu().numpy())
return np.asarray(predictions)
def predict_generator(model, generator, use_multiprocessing=False, n_workers=1, max_queue_size=8, verbose=1,
package="keras", batch_size=1):
if package == "pytorch":
from torch.utils.data import DataLoader
loader = DataLoader(generator, batch_size=batch_size, shuffle=False, num_workers=n_workers)
if verbose:
print("Loader: ", len(loader), " Batch_size: ", batch_size, " Dataset: ", len(generator))
return predict_data_loader(model, loader)
else:
return model.predict_generator(generator, use_multiprocessing=use_multiprocessing, workers=n_workers,
max_queue_size=max_queue_size, verbose=verbose)
def predict_subject(model, feature_filename, surface_filenames, surface_names, metric_names, output_filename,
reference_filename, batch_size=50, window=(64, 64, 64), flip=False, spacing=(1, 1, 1),
use_multiprocessing=False, workers=1, max_queue_size=10, overwrite=True,
generator=SubjectPredictionSequence, package="keras"):
if overwrite or not os.path.exists(output_filename):
generator = generator(feature_filename=feature_filename, surface_filenames=surface_filenames,
surface_names=surface_names, batch_size=batch_size, window=window, flip=flip,
spacing=spacing, reference_metric_filename=reference_filename)
prediction = predict_generator(model, generator, use_multiprocessing=use_multiprocessing, n_workers=workers,
max_queue_size=max_queue_size, verbose=1, batch_size=batch_size,
package=package)
output_image = new_cifti_scalar_like(np.moveaxis(prediction, 1, 0), scalar_names=metric_names,
structure_names=surface_names,
reference_cifti=nib.load(reference_filename), almost_equals_decimals=0)
output_image.to_filename(output_filename)
def make_predictions(config_filename, model_filename, output_directory='./', n_subjects=None, shuffle=False,
key='validation_filenames', use_multiprocessing=False, n_workers=1, max_queue_size=5,
batch_size=50, overwrite=True, single_subject=None, output_task_name=None, package="keras",
directory="./", n_gpus=1):
output_directory = os.path.abspath(output_directory)
config = load_json(config_filename)
if key not in config:
name = key.split("_")[0]
if name not in config:
load_subject_ids(config)
config[key] = generate_hcp_filenames(directory,
config['surface_basename_template'],
config['target_basenames'],
config['feature_basenames'],
config[name],
config['hemispheres'])
filenames = config[key]
model_basename = os.path.basename(model_filename).replace(".h5", "")
if "package" in config and config["package"] == "pytorch":
generator = HCPSubjectDataset
package = "pytorch"
else:
generator = SubjectPredictionSequence
if "model_kwargs" in config:
model_kwargs = config["model_kwargs"]
else:
model_kwargs = dict()
if "batch_size" in config:
batch_size = config["batch_size"]
if single_subject is None:
if package == "pytorch":
from unet3d.models.pytorch.build import build_or_load_model
model = build_or_load_model(model_filename=model_filename, model_name=config["model_name"],
n_features=config["n_features"], n_outputs=config["n_outputs"],
n_gpus=n_gpus, **model_kwargs)
else:
model = load_model(model_filename)
else:
model = None
if n_subjects is not None:
if shuffle:
np.random.shuffle(filenames)
filenames = filenames[:n_subjects]
for feature_filename, surface_filenames, metric_filenames, subject_id in filenames:
if single_subject is None or subject_id == single_subject:
if model is None:
if package == "pytorch":
from unet3d.models.pytorch.build import build_or_load_model
model = build_or_load_model(model_filename=model_filename, model_name=config["model_name"],
n_features=config["n_features"], n_outputs=config["n_outputs"],
n_gpus=n_gpus, **model_kwargs)
else:
model = load_model(model_filename)
if output_task_name is None:
_output_task_name = os.path.basename(metric_filenames[0]).split(".")[0]
if len(metric_filenames) > 1:
_output_task_name = "_".join(
_output_task_name.split("_")[:2] + ["ALL47"] + _output_task_name.split("_")[3:])
else:
_output_task_name = output_task_name
output_basename = "{task}-{model}_prediction.dscalar.nii".format(model=model_basename,
task=_output_task_name)
output_filename = os.path.join(output_directory, output_basename)
subject_metric_names = list()
for metric_list in config["metric_names"]:
for metric_name in metric_list:
subject_metric_names.append(metric_name.format(subject_id))
predict_subject(model,
feature_filename,
surface_filenames,
config['surface_names'],
subject_metric_names,
output_filename=output_filename,
batch_size=batch_size,
window=np.asarray(config['window']),
spacing=np.asarray(config['spacing']),
flip=False,
overwrite=overwrite,
use_multiprocessing=use_multiprocessing,
workers=n_workers,
max_queue_size=max_queue_size,
reference_filename=metric_filenames[0],
package=package,
generator=generator)
def predict_local_subject(model, feature_filename, surface_filename, batch_size=50, window=(64, 64, 64),
spacing=(1, 1, 1), flip=False, use_multiprocessing=False, workers=1, max_queue_size=10, ):
generator = SubjectPredictionSequence(feature_filename=feature_filename, surface_filename=surface_filename,
surface_name=None, batch_size=batch_size, window=window,
flip=flip, spacing=spacing)
return model.predict_generator(generator, use_multiprocessing=use_multiprocessing, workers=workers,
max_queue_size=max_queue_size, verbose=1)
def whole_brain_scalar_predictions(model_filename, subject_ids, hcp_dir, output_dir, hemispheres, feature_basenames,
surface_basename_template, target_basenames, model_name, n_outputs, n_features,
window, criterion_name, metric_names, surface_names, reference, package="keras",
n_gpus=1, n_workers=1, batch_size=1, model_kwargs=None):
from .scripts.train import generate_hcp_filenames
filenames = generate_hcp_filenames(directory=hcp_dir, surface_basename_template=surface_basename_template,
target_basenames=target_basenames, feature_basenames=feature_basenames,
subject_ids=subject_ids, hemispheres=hemispheres)
if package == "pytorch":
pytorch_whole_brain_scalar_predictions(model_filename=model_filename,
model_name=model_name,
n_outputs=n_outputs,
n_features=n_features,
filenames=filenames,
prediction_dir=output_dir,
window=window,
criterion_name=criterion_name,
metric_names=metric_names,
surface_names=surface_names,
reference=reference,
n_gpus=n_gpus,
n_workers=n_workers,
batch_size=batch_size,
model_kwargs=model_kwargs)
else:
raise ValueError("Predictions not yet implemented for {}".format(package))
def volumetric_predictions(model_filename, filenames, prediction_dir, model_name, n_features, window,
criterion_name, package="keras", n_gpus=1, n_workers=1, batch_size=1,
model_kwargs=None, n_outputs=None, sequence_kwargs=None, sequence=None,
metric_names=None, evaluate_predictions=False, interpolation="linear",
resample_predictions=True, output_template=None, segmentation=False,
segmentation_labels=None, threshold=0.5, sum_then_threshold=True, label_hierarchy=None,
write_input_images=False):
if package == "pytorch":
pytorch_volumetric_predictions(model_filename=model_filename,
model_name=model_name,
n_outputs=n_outputs,
n_features=n_features,
filenames=filenames,
prediction_dir=prediction_dir,
window=window,
criterion_name=criterion_name,
n_gpus=n_gpus,
n_workers=n_workers,
batch_size=batch_size,
model_kwargs=model_kwargs,
sequence_kwargs=sequence_kwargs,
sequence=sequence,
metric_names=metric_names,
evaluate_predictions=evaluate_predictions,
interpolation=interpolation,
resample_predictions=resample_predictions,
output_template=output_template,
segmentation=segmentation,
segmentation_labels=segmentation_labels,
threshold=threshold,
sum_then_threshold=sum_then_threshold,
label_hierarchy=label_hierarchy,
write_input_images=write_input_images)
else:
raise ValueError("Predictions not yet implemented for {}".format(package))
def pytorch_whole_brain_scalar_predictions(model_filename, model_name, n_outputs, n_features, filenames, window,
criterion_name, metric_names, surface_names, prediction_dir=None,
output_csv=None, reference=None, n_gpus=1, n_workers=1, batch_size=1,
model_kwargs=None):
from .train.pytorch import load_criterion
from unet3d.models.pytorch.build import build_or_load_model
from .utils.pytorch.dataset import WholeBrainCIFTI2DenseScalarDataset
import torch
from torch.utils.data import DataLoader
if model_kwargs is None:
model_kwargs = dict()
model = build_or_load_model(model_name=model_name, model_filename=model_filename, n_outputs=n_outputs,
n_features=n_features, n_gpus=n_gpus, **model_kwargs)
model.eval()
basename = os.path.basename(model_filename).split(".")[0]
if prediction_dir and not output_csv:
output_csv = os.path.join(prediction_dir, str(basename) + "_prediction_scores.csv")
dataset = WholeBrainCIFTI2DenseScalarDataset(filenames=filenames,
window=window,
metric_names=metric_names,
surface_names=surface_names,
spacing=None,
batch_size=1)
criterion = load_criterion(criterion_name, n_gpus=n_gpus)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=n_workers)
results = list()
print("Loader: ", len(loader), " Batch_size: ", batch_size, " Dataset: ", len(dataset))
with torch.no_grad():
if reference is not None:
reference = torch.from_numpy(reference).unsqueeze(0)
if n_gpus > 0:
reference = reference.cuda()
for batch_idx, (x, y) in enumerate(loader):
print("Batch: ", batch_idx)
if n_gpus > 0:
x = x.cuda()
y = y.cuda()
pred_y = model(x)
if type(pred_y) == tuple:
pred_y = pred_y[0] # This is a hack to ignore other outputs that are used only for training
for i in range(batch_size):
row = list()
idx = (batch_idx * batch_size) + i
print("i: ", i, " idx: ", idx)
if idx >= len(dataset):
break
args = dataset.filenames[idx]
subject_id = args[-1]
row.append(subject_id)
idx_score = criterion(pred_y[i].unsqueeze(0), y[i].unsqueeze(0)).item()
row.append(idx_score)
if reference is not None:
idx_ref_score = criterion(reference.reshape(y[i].unsqueeze(0).shape),
y[i].unsqueeze(0)).item()
row.append(idx_ref_score)
results.append(row)
save_predictions(prediction=pred_y[i].cpu().numpy(), args=args, basename=basename,
metric_names=metric_names, surface_names=surface_names, prediction_dir=prediction_dir)
if output_csv is not None:
columns = ["subject_id", criterion_name]
if reference is not None:
columns.append("reference_" + criterion_name)
pd.DataFrame(results, columns=columns).to_csv(output_csv)
def load_volumetric_model(model_name, model_filename, n_outputs, n_features, n_gpus, strict, **kwargs):
from unet3d.models.pytorch.build import build_or_load_model
model = build_or_load_model(model_name=model_name, model_filename=model_filename, n_outputs=n_outputs,
n_features=n_features, n_gpus=n_gpus, strict=strict, **kwargs)
model.eval()
return model
def load_volumetric_sequence(sequence, sequence_kwargs, filenames, window, spacing, metric_names, batch_size=1):
from .utils.pytorch.dataset import AEDataset
if sequence is None:
sequence = AEDataset
if sequence_kwargs is None:
sequence_kwargs = dict()
dataset = sequence(filenames=filenames, window=window, spacing=spacing, batch_size=batch_size,
metric_names=metric_names,
**sequence_kwargs)
return dataset
def load_volumetric_model_and_dataset(model_name, model_filename, model_kwargs, n_outputs, n_features,
strict_model_loading, n_gpus, sequence, sequence_kwargs, filenames, window,
spacing, metric_names):
if model_kwargs is None:
model_kwargs = dict()
model = load_volumetric_model(model_name=model_name, model_filename=model_filename, n_outputs=n_outputs,
n_features=n_features, strict=strict_model_loading, n_gpus=n_gpus, **model_kwargs)
dataset = load_volumetric_sequence(sequence, sequence_kwargs, filenames, window, spacing, metric_names,
batch_size=1)
basename = os.path.basename(model_filename).split(".")[0]
return model, dataset, basename
def load_images_from_dataset(dataset, idx, resample_predictions):
if resample_predictions:
x_image, ref_image = dataset.get_feature_image(idx, return_unmodified=True)
else:
x_image = dataset.get_feature_image(idx)
ref_image = None
return x_image, ref_image
def get_feature_filename_and_subject_id(dataset, idx, verbose=False):
epoch_filenames = dataset.epoch_filenames[idx]
x_filename = epoch_filenames[dataset.feature_index]
if verbose:
print("Reading:", x_filename)
subject_id = epoch_filenames[-1]
return x_filename, subject_id
def pytorch_predict_batch(batch_x, model, n_gpus):
if n_gpus > 0:
batch_x = batch_x.cuda()
if hasattr(model, "test"):
pred_x = model.test(batch_x)
else:
pred_x = model(batch_x)
return pred_x.cpu()
def prediction_to_image(data, input_image, reference_image=None, interpolation="linear", segmentation=False,
segmentation_labels=None, threshold=0.5, sum_then_threshold=False, label_hierarchy=False):
pred_image = new_img_like(input_image, data=data)
if reference_image is not None:
pred_image = resample_to_img(pred_image, reference_image,
interpolation=interpolation)
if segmentation:
pred_image = one_hot_image_to_label_map(pred_image,
labels=segmentation_labels,
threshold=threshold,
sum_then_threshold=sum_then_threshold,
label_hierarchy=label_hierarchy)
return pred_image
def write_prediction_image_to_file(pred_image, output_template, subject_id, x_filename, prediction_dir, basename,
verbose=False):
if output_template is None:
while type(x_filename) == list:
x_filename = x_filename[0]
pred_filename = os.path.join(prediction_dir,
"_".join([subject_id,
basename,
os.path.basename(x_filename)]))
else:
pred_filename = os.path.join(prediction_dir,
output_template.format(subject=subject_id))
if verbose:
print("Writing:", pred_filename)
pred_image.to_filename(pred_filename)
def pytorch_predict_batch_array(model, batch, n_gpus=1):
import torch
batch_x = torch.tensor(np.moveaxis(np.asarray(batch), -1, 1)).float()
pred_x = pytorch_predict_batch(batch_x, model, n_gpus)
return np.moveaxis(pred_x.numpy(), 1, -1)
def predict_volumetric_batch(model, batch, batch_references, batch_subjects, batch_filenames,
basename, prediction_dir,
segmentation, output_template, n_gpus, verbose, threshold, interpolation,
segmentation_labels, sum_then_threshold, label_hierarchy, write_input_image=False):
pred_x = pytorch_predict_batch_array(model, batch, n_gpus=n_gpus)
for batch_idx in range(len(batch)):
pred_image = prediction_to_image(pred_x[batch_idx].squeeze(), input_image=batch_references[batch_idx][0],
reference_image=batch_references[batch_idx][1], interpolation=interpolation,
segmentation=segmentation, segmentation_labels=segmentation_labels,
threshold=threshold, sum_then_threshold=sum_then_threshold,
label_hierarchy=label_hierarchy)
write_prediction_image_to_file(pred_image, output_template,
subject_id=batch_subjects[batch_idx],
x_filename=batch_filenames[batch_idx],
prediction_dir=prediction_dir,
basename=basename,
verbose=verbose)
if write_input_image:
write_prediction_image_to_file(batch_references[batch_idx][0], output_template=output_template,
subject_id=batch_subjects[batch_idx] + "_input",
x_filename=batch_filenames[batch_idx],
prediction_dir=prediction_dir,
basename=basename,
verbose=verbose)
def pytorch_volumetric_predictions(model_filename, model_name, n_features, filenames, window,
criterion_name, prediction_dir=None, output_csv=None, reference=None,
n_gpus=1, n_workers=1, batch_size=1, model_kwargs=None, n_outputs=None,
sequence_kwargs=None, spacing=None, sequence=None,
strict_model_loading=True, metric_names=None,
print_prediction_time=True, verbose=True,
evaluate_predictions=False, resample_predictions=False, interpolation="linear",
output_template=None, segmentation=False, segmentation_labels=None,
sum_then_threshold=True, threshold=0.7, label_hierarchy=None,
write_input_images=False):
import torch
# from .train.pytorch import load_criterion
model, dataset, basename = load_volumetric_model_and_dataset(model_name, model_filename, model_kwargs, n_outputs,
n_features, strict_model_loading, n_gpus, sequence,
sequence_kwargs, filenames, window, spacing,
metric_names)
# criterion = load_criterion(criterion_name, n_gpus=n_gpus)
results = list()
print("Dataset: ", len(dataset))
with torch.no_grad():
batch = list()
batch_references = list()
batch_subjects = list()
batch_filenames = list()
for idx in range(len(dataset)):
x_filename, subject_id = get_feature_filename_and_subject_id(dataset, idx, verbose=verbose)
x_image, ref_image = load_images_from_dataset(dataset, idx, resample_predictions)
batch.append(get_nibabel_data(x_image))
batch_references.append((x_image, ref_image))
batch_subjects.append(subject_id)
batch_filenames.append(x_filename)
if len(batch) >= batch_size or idx == (len(dataset) - 1):
predict_volumetric_batch(model=model, batch=batch, batch_references=batch_references,
batch_subjects=batch_subjects, batch_filenames=batch_filenames,
basename=basename, prediction_dir=prediction_dir,
segmentation=segmentation, output_template=output_template, n_gpus=n_gpus,
verbose=verbose, threshold=threshold, interpolation=interpolation,
segmentation_labels=segmentation_labels,
sum_then_threshold=sum_then_threshold, label_hierarchy=label_hierarchy,
write_input_image=write_input_images)
batch = list()
batch_references = list()
batch_subjects = list()
batch_filenames = list()
def save_predictions(prediction, args, basename, metric_names, surface_names, prediction_dir):
ref_filename = args[2][0]
subject_id = args[-1]
ref_basename = os.path.basename(ref_filename)
prediction_name = "_".join((subject_id, basename, "prediction"))
_metric_names = [_metric_name.format(prediction_name) for _metric_name in np.asarray(metric_names).ravel()]
output_filename = os.path.join(prediction_dir, ref_basename.replace(subject_id, prediction_name))
if prediction_dir is not None and not os.path.exists(output_filename):
ref_cifti = nib.load(ref_filename)
prediction_array = prediction.reshape(len(_metric_names),
np.sum(ref_cifti.header.get_axis(1).surface_mask))
cifti_file = new_cifti_scalar_like(prediction_array, _metric_names, surface_names, ref_cifti)
cifti_file.to_filename(output_filename)
def pytorch_subject_predictions(idx, model, dataset, criterion, basename, prediction_dir, surface_names, metric_names,
n_gpus, reference):
import torch
with torch.no_grad():
args = dataset.filenames[idx]
ref_filename = args[2][0]
subject_id = args[-1]
ref_basename = os.path.basename(ref_filename)
prediction_name = "_".join((subject_id, basename, "prediction"))
_metric_names = [_metric_name.format(prediction_name) for _metric_name in np.asarray(metric_names).ravel()]
output_filename = os.path.join(prediction_dir, ref_basename.replace(subject_id, prediction_name))
x, y = dataset[idx]
if os.path.exists(output_filename):
prediction = torch.from_numpy(get_metric_data([nib.load(output_filename)],
[_metric_names],
surface_names,
subject_id)).float().cpu()
else:
prediction = model(x.unsqueeze(0))
if n_gpus > 0:
prediction = prediction.cpu()
y = y.unsqueeze(0)
score = criterion(prediction.reshape(y.shape), y).item()
row = [subject_id, score]
if reference is not None:
reference_score = criterion(reference.reshape(y.shape), y).item()
row.append(reference_score)
if prediction_dir is not None and not os.path.exists(output_filename):
ref_cifti = nib.load(ref_filename)
prediction_array = prediction.numpy().reshape(len(_metric_names),
np.sum(ref_cifti.header.get_axis(1).surface_mask))
cifti_file = new_cifti_scalar_like(prediction_array, _metric_names, surface_names, ref_cifti)
cifti_file.to_filename(output_filename)
return row
def single_volume_zstat_denoising(model_filename, model_name, n_features, filenames, window, prediction_dir,
n_gpus=1, batch_size=1, model_kwargs=None, n_outputs=None,
sequence_kwargs=None, spacing=None, sequence=None,
strict_model_loading=True, metric_names=None,
verbose=True, resample_predictions=False, **unused_kwargs):
import torch
model, dataset, basename = load_volumetric_model_and_dataset(model_name, model_filename, model_kwargs, n_outputs,
n_features, strict_model_loading, n_gpus, sequence,
sequence_kwargs, filenames, window, spacing,
metric_names)
dataset.extract_sub_volumes = False
print("Dataset: ", len(dataset))
with torch.no_grad():
completed = set()
batch = list()
for idx in range(len(dataset)):
x_filename, subject_id = get_feature_filename_and_subject_id(dataset, idx, verbose=False)
while type(x_filename) == list:
x_filename = x_filename[0]
if x_filename in completed:
continue
if verbose:
print("Reading:", x_filename)
x_image, ref_image = load_images_from_dataset(dataset, idx, resample_predictions)
if len(x_image.shape) == 4:
volumes_per_image = x_image.shape[3]
prediction_data = np.zeros(x_image.shape)
else:
volumes_per_image = 1
prediction_data = np.zeros(x_image.shape + (volumes_per_image,))
data = get_nibabel_data(x_image)
for image_idx in range(volumes_per_image):
batch.append(data[..., image_idx][..., None])
if len(batch) >= batch_size or image_idx == volumes_per_image - 1:
prediction = pytorch_predict_batch_array(model, batch, n_gpus)
prediction = np.moveaxis(prediction, 0, -1).squeeze()
prediction_data[..., (image_idx - prediction.shape[-1] + 1):(image_idx + 1)] = prediction
batch = list()
pred_image = new_img_like(ref_niimg=x_image, data=prediction_data)
output_filename = os.path.join(prediction_dir, "_".join((subject_id,
basename,
os.path.basename(x_filename))))
if verbose:
print("Writing:", output_filename)
pred_image.to_filename(output_filename)
completed.add(x_filename)
def predictions_with_permutations(model_filename, model_name, n_features, filenames, window, prediction_dir=None,
n_gpus=1, batch_size=1, model_kwargs=None, n_outputs=None, sequence_kwargs=None,
spacing=None, sequence=None, strict_model_loading=True, metric_names=None,
verbose=True, resample_predictions=False, interpolation="linear",
output_template=None, segmentation=False, segmentation_labels=None,
sum_then_threshold=True, threshold=0.5, label_hierarchy=None, permutation_weight=None,
**unused_args):
import torch
model, dataset, basename = load_volumetric_model_and_dataset(model_name, model_filename, model_kwargs, n_outputs,
n_features, strict_model_loading, n_gpus, sequence,
sequence_kwargs, filenames, window, spacing,
metric_names)
permutation_keys = list(generate_permutation_keys())
permutation_weights = np.ones((len(permutation_keys), 1, 1, 1, 1))
# TODO: make this work with models that only output one prediction map
if permutation_weight is not None:
non_perm_index = permutation_keys.index(((0, 0), 0, 0, 0, 0))
permutation_weights = permutation_weights * permutation_weight
permutation_weights[non_perm_index] = len(permutation_keys) * (1 - permutation_weight)
dataset.extract_sub_volumes = False
print("Dataset: ", len(dataset))
with torch.no_grad():
for idx in range(len(dataset)):
x_filename, subject_id = get_feature_filename_and_subject_id(dataset, idx, verbose=verbose)
x_image, ref_image = load_images_from_dataset(dataset, idx, resample_predictions)
data = get_nibabel_data(x_image)
prediction_data = predict_with_permutations(model, data, n_outputs, batch_size, n_gpus, permutation_keys,
permutation_weights)
pred_image = prediction_to_image(prediction_data.squeeze(),
input_image=x_image,
reference_image=ref_image,
interpolation=interpolation,
segmentation=segmentation,
segmentation_labels=segmentation_labels,
threshold=threshold,
sum_then_threshold=sum_then_threshold,
label_hierarchy=label_hierarchy)
write_prediction_image_to_file(pred_image,
output_template,
subject_id=subject_id,
x_filename=x_filename,
prediction_dir=prediction_dir,
basename=basename,
verbose=verbose)
def predict_with_permutations(model, data, n_outputs, batch_size, n_gpus, permutation_keys, permutation_weights):
import torch
prediction_data = np.zeros((len(permutation_keys),) + data.shape[:3] + (n_outputs,))
batch = list()
permutation_indices = list()
data = np.moveaxis(data, 3, 0)
for permutation_idx, permutation_key in enumerate(permutation_keys):
batch.append(permute_data(data, permutation_key))
permutation_indices.append(permutation_idx)
if len(batch) >= batch_size or permutation_key == permutation_keys[-1]:
batch_prediction = pytorch_predict_batch(torch.tensor(batch).float(), model, n_gpus).numpy()
for batch_idx, perm_idx in enumerate(permutation_indices):
prediction_data[perm_idx] = np.moveaxis(reverse_permute_data(batch_prediction[batch_idx],
permutation_keys[perm_idx]).squeeze(),
0, 3)
batch = list()
permutation_indices = list()
# average over all the permutations
return np.mean(prediction_data * permutation_weights, axis=0)
def predict_super_resolution(model_filename, model_name, n_features, filenames, window, prediction_dir=None,
n_gpus=1, batch_size=1, model_kwargs=None, n_outputs=None, sequence_kwargs=None,
spacing=None, sequence=None, strict_model_loading=True, metric_names=None,
verbose=True, resample_predictions=False, interpolation="linear",
output_template=None, segmentation=False, segmentation_labels=None,
sum_then_threshold=True, threshold=0.5, label_hierarchy=None, **unused_args):
import torch
new_window = list(np.asarray(window) * 2)
model, dataset, basename = load_volumetric_model_and_dataset(model_name, model_filename, model_kwargs, n_outputs,
n_features, strict_model_loading, n_gpus, sequence,
sequence_kwargs, filenames, new_window, spacing,
metric_names)
dataset.extract_sub_volumes = False
print("Dataset: ", len(dataset))
with torch.no_grad():
for idx in range(len(dataset)):
x_filename, subject_id = get_feature_filename_and_subject_id(dataset, idx, verbose=verbose)
x_image, ref_image = load_images_from_dataset(dataset, idx, resample_predictions)
data = get_nibabel_data(x_image)
prediction_data = predict_super_resolution_data(model, data, batch_size, n_gpus)
pred_image = prediction_to_image(prediction_data.squeeze(),
input_image=x_image,
reference_image=ref_image,
interpolation=interpolation,
segmentation=segmentation,
segmentation_labels=segmentation_labels,
threshold=threshold,
sum_then_threshold=sum_then_threshold,
label_hierarchy=label_hierarchy)
write_prediction_image_to_file(pred_image,
output_template,
subject_id=subject_id,
x_filename=x_filename,
prediction_dir=prediction_dir,
basename=basename,
verbose=verbose)
def predict_super_resolution_data(model, data, batch_size, n_gpus):
batch = list()
input_data = break_down_volume_into_half_size_volumes(data)
predicted_data = list()
for i, volume in enumerate(input_data):
batch.append(volume)
if len(batch) >= batch_size or i == (len(input_data) - 1):
batch_prediction = pytorch_predict_batch_array(model=model, batch=batch, n_gpus=n_gpus)
for batch_idx in range(batch_prediction.shape[0]):
predicted_data.append(batch_prediction[batch_idx])
batch = list()
return combine_half_size_volumes(predicted_data)
| mit |
gimli-org/gimli | doc/tutorials/dev/plot_XX_mod_fv_stokes-2d.py | 1 | 8759 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import sys
import pygimli as pg
import pygimli.solver as solver
from pygimli.meshtools import polyCreateDefaultEdges_
from pygimli.meshtools import createMesh
import matplotlib.pyplot as plt
import numpy as np
#from solverFVM import solveStokes_NEEDNAME
def modelCavity0(maxArea=0.0025):
mesh = pg.createGrid(x=np.linspace(.0, 1.0, 17),
y=np.linspace(.0, 1.0, 17))
mesh = pg.meshtools.refineQuad2Tri(mesh, style=2)
velBoundary=[[1,[0.0, 0.0]],
[2,[0.0, 0.0]],
[3,[1.0, 0.0]],
[4,[0.0, 0.0]],
]
c = mesh.findCell((0.0, 0.0))
for b in range(c.boundaryCount()):
if c.boundary(b).marker()==4:
c.boundary(b).setMarker(7)
preBoundary=[[7, 0.0],]
a = pg.Vector(mesh.cellCount(), 1.0)
return mesh, velBoundary, preBoundary, a, 100
def modelCavity1(maxArea=0.0025):
boundary = []
boundary.append([-1.0, -1.0])
boundary.append([ -0.5, -1.0])
boundary.append([ -0.5, -0.7])
boundary.append([ 0.5, -0.7])
boundary.append([ 0.5, -1.0])
boundary.append([ 1.0, -1.0])
boundary.append([ 1.0, 1.0])
boundary.append([-1.0, 1.0])
poly = pg.Mesh(2)
nodes = [poly.createNode(b) for b in boundary]
polyCreateDefaultEdges_(poly, boundaryMarker=[4,4,4,4,4,2,3,1])
mesh = createMesh(poly, quality=33.4, area=maxArea, smooth=[0,10])
# Diffusions coefficient, viscosity
b7 = mesh.findBoundaryByMarker(1)[0]
for b in mesh.findBoundaryByMarker(1):
if b.center()[1] < b.center()[1]:
b7 = b
b7.setMarker(7)
velBoundary=[ [1,[0.0, 0.0]],
[2,[0.0, 0.0]],
[3,[1.0, 0.0]],
[4,[0.0, 0.0]],
[7,[0.0, 0.0]]]
preBoundary=[[7,0.0]]
a = pg.Vector(mesh.cellCount(), 10000.0)
return mesh, velBoundary, preBoundary, a, 100
def modelCavity2(area, refine=True):
boundary = []
boundary.append([-1.0, 0.0]) #0
boundary.append([-1.0, -1.0]) #1
boundary.append([-0.2, -1.0]) #2
boundary.append([-0.2, -0.8]) #3
boundary.append([ 0.2, -0.8]) #4
boundary.append([ 0.2, -1.0]) #5
boundary.append([ 1.0, -1.0]) #6
boundary.append([ 1.0, 0.0]) #7
boundary.append([ 0.2, 0.0]) #8
boundary.append([ 0.2, -0.2]) #9
boundary.append([-0.2, -0.2]) #10
boundary.append([-0.2, 0.0]) #11
poly = pg.Mesh(2)
nodes = [poly.createNode(b) for b in boundary]
eMarker = [1, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2]
for i in range(len(nodes)):
poly.createEdge(nodes[i], nodes[(i+1)%len(nodes)], eMarker[i])
if refine:
poly.createNode([-0.21, -0.99])
mesh = createMesh(poly, quality=34.0, area=area, smooth=[0,10])
# Diffusions coefficient, viscosity
b7 = mesh.findBoundaryByMarker(2)[0]
for b in mesh.findBoundaryByMarker(1):
if b.center()[1] < b.center()[1]:
b7 = b
b7.setMarker(4)
velBoundary=[[1,[1.0, 0.0]],
[2,[0.0, 0.0]],
[3,[1.0, 0.0]],
[4,[0.0, 0.0]],
]
preBoundary=[[4,0.0]]
a = pg.Vector(mesh.cellCount(), 1.0)
return mesh, velBoundary, preBoundary, a, 50000
def modelPipe():
boundary = []
#left inflow
boundary.append([0.1, 0.0]) # 0
boundary.append([0.0, 0.0]) # 1
boundary.append([0.0, -1.0])
boundary.append([0.0, -1.1])
boundary.append([1.0, -1.1])
boundary.append([1.0, -1.0])
#right outflow
boundary.append([1.0, 0.0]) # 1
boundary.append([0.9, 0.0]) # 0
#closing
boundary.append([0.9, -1.0]) # 0
boundary.append([0.1, -1.0]) # 0
poly = pg.Mesh(2)
nodes = [poly.createNode(b) for b in boundary]
for i in range(len(nodes)): poly.createEdge(nodes[i], nodes[(i+1)%len(nodes)], 1)
poly.boundaries()[0].setMarker(2)
for b in poly.boundaries():
if b.norm()[1]==1.0 and b.center()[0]==0.95:
b.setMarker(3)
if b.norm()[1]==-1.0 and b.center()[0]==0.5:
b.setMarker(4)
mesh = createMesh(poly, quality=34.0, area=0.0005, smooth=[0,10])
velBoundary=[[1, [0.0, 0.0]],
[2, [0.0, -1.0]],
[3, [0.0, 1.0]],
[4, [0.0, 0.0]]
]
#preBoundary=None
preBoundary=[[4, 0.0]]
a=1
return mesh, velBoundary, preBoundary, a, 400
def modelPlume(maxArea=0.1):
boundary = []
boundary.append([-1., 0.0])#0
boundary.append([-1., -1.0])#1
boundary.append([-0.1, -1.0])#2
boundary.append([ 0.1, -1.0])#3
boundary.append([ 1., -1.0])#4
boundary.append([ 1., 0.0])#5
boundary.append([ 0.1, 0.0])#6
boundary.append([ -0.1, 0.0])#7
poly = pg.Mesh(2)
nodes = [poly.createNode(b) for b in boundary]
polyCreateDefaultEdges_(poly, boundaryMarker=[1,2,3,2,4,5,6,7])
#poly.createEdge(nodes[0], nodes[1], 1) # left
#poly.createEdge(nodes[1], nodes[2], 2) # bottom1
#poly.createEdge(nodes[2], nodes[3], 3) # bottom2
#poly.createEdge(nodes[3], nodes[4], 2) # bottom3
#poly.createEdge(nodes[4], nodes[5], 4) # right
#poly.createEdge(nodes[5], nodes[6], 5) # top1
#poly.createEdge(nodes[6], nodes[7], 6) # topcenter
#poly.createEdge(nodes[7], nodes[0], 7) # top2
mesh = createMesh(poly, quality=33.4, area=maxArea,
smooth=[0,10], verbose=False)
velBoundary=[#[1, [0.0, 0.0]],
# [2, [0.0, 0.0]],
#[3, [0.0, 0.0]],
#[4, [0.0, 0.0]],
[5, [1.0, 0.0]],
[6, [0.0, 0.0]],
[7, [-1.0, 0.0]]
]
b = mesh.findBoundaryByMarker(2)[0]
b.setMarker(8)
#preBoundary=None
preBoundary=[[8, 0.0]]
a=1
return mesh, velBoundary, preBoundary, a, 100
modelBuilder = modelCavity0
#modelBuilder = modelCavity1
#modelBuilder = modelCavity2
#modelBuilder = modelPipe
#modelBuilder = modelPlume
swatchG = pg.core.Stopwatch(True)
swatch = pg.core.Stopwatch(True)
nSteps = 1
multigridArea = (10.**(np.linspace(np.log10(0.001), np.log10(0.1), nSteps)))[::-1]
print(multigridArea)
pre = None
vel = None
for i in range(0, len(multigridArea)):
if i == 0:
mesh, velBoundary, preBoundary, a, maxIter = modelBuilder(multigridArea[0])
pre = np.zeros(mesh.cellCount())
vel = np.zeros((mesh.cellCount(), 3))
else:
#mesh1 = mesh.createH2()
mesh1, velBoundary, preBoundary, a, maxIter = modelBuilder(multigridArea[i])
a = pg.Vector(mesh1.cellCount(), 1.0)
pre = pg.interpolate(mesh, pre, mesh1.cellCenter())
vx0 = pg.interpolate(mesh, vel[:,0], mesh1.cellCenter())
vy0 = pg.interpolate(mesh, vel[:,1], mesh1.cellCenter())
vel = np.vstack([vx0, vy0]).T
mesh = mesh1
print("Cells: ", mesh.cellCount(), multigridArea[i])
vel, pre, pCNorm, divVNorm = pg.solver.solveStokes(mesh,
viscosity=a,
velBoundary=velBoundary,
preBoundary=preBoundary,
pre0=pre,
vel0=vel,
maxIter=maxIter,
tol=1e-2,
verbose=1)
print(" Time: ", swatch.duration(True))
print("OverallTime:", swatchG.duration(True))
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax, cbar = pg.show(mesh,
data=pg.core.cellDataToPointData(mesh, pre),
logScale=False, colorBar=True, axes=ax1)
cbar.ax.set_xlabel('Pressure in ??')
meshC, velBoundary, preBoundary, a, maxIter = modelBuilder(0.01)
pg.show(mesh, data=vel, coarseMesh=meshC, axes=ax1)
pg.show(mesh, axes=ax1)
ax2 = fig.add_subplot(1, 2, 2)
ax, cbar = pg.show(mesh,
data=pg.core.cellDataToPointData(mesh,
np.sqrt(vel[:,0]*vel[:,0] +vel[:,1]*vel[:,1])),
logScale=False, colorBar=True, axes=ax2)
cbar.ax.set_xlabel('Geschwindigkeit in m$/$s')
pg.show(mesh, data=vel, coarseMesh=meshC, axes=ax2)
pg.show(mesh, axes=ax2)
plt.figure()
plt.semilogy(pCNorm, label='norm')
plt.semilogy(divVNorm, label='norm div v')
plt.legend()
pg.wait()
#drawMesh(ax, mesh) | apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/testing/jpl_units/UnitDblFormatter.py | 23 | 1485 | #===========================================================================
#
# UnitDblFormatter
#
#===========================================================================
"""UnitDblFormatter module containing class UnitDblFormatter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.ticker as ticker
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblFormatter' ]
#===========================================================================
class UnitDblFormatter( ticker.ScalarFormatter ):
"""The formatter for UnitDbl data types. This allows for formatting
with the unit string.
"""
def __init__( self, *args, **kwargs ):
'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
ticker.ScalarFormatter.__init__( self, *args, **kwargs )
def __call__( self, x, pos = None ):
'Return the format for tick val x at position pos'
if len(self.locs) == 0:
return ''
else:
return str(x)
def format_data_short( self, value ):
"Return the value formatted in 'short' format."
return str(value)
def format_data( self, value ):
"Return the value formatted into a string."
return str(value)
| mit |
ilayn/scipy | scipy/cluster/tests/test_hierarchy.py | 12 | 42543 | #
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
import pytest
from pytest import raises as assert_raises
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette, cut_tree, optimal_leaf_ordering,
_order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
from scipy.spatial.distance import pdist
from scipy.cluster._hierarchy import Heap
from . import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib # type: ignore[import]
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt # type: ignore[import]
have_matplotlib = True
except Exception:
have_matplotlib = False
class TestLinkage:
def test_linkage_non_finite_elements_in_distance_matrix(self):
# Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
# Exception expected.
y = np.zeros((6,))
y[0] = np.nan
assert_raises(ValueError, linkage, y)
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted']:
self.check_linkage_tdist(method)
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
self.check_linkage_q(method)
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
metric="euclidean")
Z = linkage(y, method)
assert_allclose(Z, expectedZ, atol=1e-06)
def test_compare_with_trivial(self):
rng = np.random.RandomState(0)
n = 20
X = rng.rand(n, 2)
d = pdist(X)
for method, code in _LINKAGE_METHODS.items():
Z_trivial = _hierarchy.linkage(d, n, code)
Z = linkage(d, method)
assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
def test_optimal_leaf_ordering(self):
Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
assert_allclose(Z, expectedZ, atol=1e-10)
class TestLinkageTies:
_expectations = {
'single': np.array([[0, 1, 1.41421356, 2],
[2, 3, 1.41421356, 3]]),
'complete': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.82842712, 3]]),
'average': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'weighted': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'centroid': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'median': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'ward': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.44948974, 3]]),
}
def test_linkage_ties(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
self.check_linkage_ties(method)
def check_linkage_ties(self, method):
X = np.array([[-1, -1], [0, 0], [1, 1]])
Z = linkage(X, method=method)
expectedZ = self._expectations[method]
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent:
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
self.check_inconsistent_tdist(depth)
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance:
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion:
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster:
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fclusterdata(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fclusterdata(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fclusterdata(t, 'maxclust')
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fcluster(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster(t, 'maxclust')
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster_monocrit(t)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster_maxclust_monocrit(t)
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders:
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic:
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc)
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc, True, 5)
def test_is_isomorphic_7(self):
# Regression test for gh-6271
assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in range(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage:
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_linkage_various_size(nrow, ncol, valid)
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent:
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_im_various_size(nrow, ncol, valid)
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage:
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList:
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
self.check_leaves_list_Q(method)
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond:
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in range(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in range(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic:
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists:
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxdists_Q_linkage(method)
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts:
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxinconsts_Q_linkage(method)
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat:
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
self.check_maxRstat_invalid_index(i)
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
self.check_maxRstat_empty_linkage(i)
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
self.check_maxRstat_difrow_linkage(i)
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
self.check_maxRstat_one_cluster_linkage(i)
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
self.check_maxRstat_Q_linkage(method, i)
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram:
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
def test_labels_as_array_or_list(self):
# test for gh-12418
Z = linkage(hierarchy_test_data.ytdist, 'single')
labels = np.array([1, 3, 2, 6, 4, 5])
result1 = dendrogram(Z, labels=labels, no_plot=True)
result2 = dendrogram(Z, labels=labels.tolist(), no_plot=True)
assert result1 == result2
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_valid_label_size(self):
link = np.array([
[0, 1, 1.0, 4],
[2, 3, 1.0, 5],
[4, 5, 2.0, 6],
])
plt.figure()
with pytest.raises(ValueError) as exc_info:
dendrogram(link, labels=list(range(100)))
assert "Dimensions of Z and labels must be consistent."\
in str(exc_info.value)
with pytest.raises(
ValueError,
match="Dimensions of Z and labels must be consistent."):
dendrogram(link, labels=[])
plt.close()
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
self.check_dendrogram_plot(orientation)
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
assert_equal(testlabel.get_size(), 20)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_size(), 20)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C0'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9],
'leaves_color_list': ['C0', 'C0'],
})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def test_unsupported_uncondensed_distance_matrix_linkage_warning():
assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
def test_node_compare():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
def test_cut_tree():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
cutree = cut_tree(Z)
assert_equal(cutree[:, 0], np.arange(nobs))
assert_equal(cutree[:, -1], np.zeros(nobs))
assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
nodes = _order_cluster_tree(Z)
heights = np.array([node.dist for node in nodes])
assert_equal(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5))
assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]))
assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]))
def test_optimal_leaf_ordering():
# test with the distance vector y
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist),
hierarchy_test_data.ytdist)
expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
assert_allclose(Z, expectedZ, atol=1e-10)
# test with the observation matrix X
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'),
hierarchy_test_data.X)
expectedZ = hierarchy_test_data.linkage_X_ward_olo
assert_allclose(Z, expectedZ, atol=1e-06)
def test_Heap():
values = np.array([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
| bsd-3-clause |
sammosummo/sammosummo.github.io | assets/scripts/neals-funnel-c.py | 1 | 1700 | """Generate data and sample from Neal's funnel distribution.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pymc3 as pm
from scipy.stats import norm
def main():
with pm.Model():
# set up figure
fs = rcParams["figure.figsize"]
rcParams["figure.figsize"] = (fs[0], fs[0] / 2)
rcParams["lines.linewidth"] = 2
rcParams["font.size"] = 14
# simulate data
np.random.seed(0)
k = 9
n = 10000
v = norm.rvs(0, 3, n)
x = norm.rvs(0, np.exp(v / 2), (k, n))
# set up model
v_ = pm.Normal("v", mu=0, sd=3)
xt_ = pm.Normal("xt", mu=0, sd=1, shape=k)
x_ = pm.Deterministic("x", pm.math.exp(v_ / 2) * xt_)
# sample and save samples
trace = pm.sample(n, chains=1)
v_samples = trace["v"][:]
xt_samples = trace["xt"][:].T
x_samples = trace["x"][:].T
# plot samples
# plot simulated data
fig, axes = plt.subplots(1, 2, constrained_layout=True)
ax = axes[0]
ax.scatter(
xt_samples[0], v_samples, marker=".", alpha=0.05, rasterized=True, color="r"
)
ax.set_xlim(-3.5, 3.5)
ax.set_ylim(-9, 9)
ax.set_xlabel(r"$\tilde{x}_0$")
ax.set_ylabel("$v$")
ax = axes[1]
ax.scatter(
x_samples[0], v_samples, marker=".", alpha=0.05, rasterized=True, color="r"
)
ax.set_xlabel("$x_0$")
ax.set_xlim(-20, 20)
ax.set_ylim(-9, 9)
# save
plt.savefig("../images/neals-funnel-c.svg", bbox_inches=0, transparent=True)
if __name__ == "__main__":
main()
| mit |
henridwyer/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.