repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
PatrickChrist/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/cluster/plot_face_ward_segmentation.py | 71 | 2460 | """
=========================================================================
A demo of structured Ward hierarchical clustering on a raccoon face image
=========================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
# Generate data
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
X = np.reshape(face, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*face.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/linear_model/randomized_l1.py | 8 | 22876 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, safe_asarray,
check_arrays, safe_mask)
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set.astype(np.float)
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_arrays(X, y)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch'])(
estimator_func, X, y,
scaling=self.scaling,
n_resampling=self.n_resampling,
n_jobs=self.n_jobs,
verbose=self.verbose,
pre_dispatch=self.pre_dispatch,
random_state=self.random_state,
sample_fraction=self.sample_fraction,
**params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return safe_asarray(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is thepath to the caching directory.
Attributes
----------
`scores_` : array, shape = [n_features]
Feature scores between 0 and 1.
`all_scores_` : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold: float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=False
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is thepath to the caching directory.
Attributes
----------
`scores_` : array, shape = [n_features]
Feature scores between 0 and 1.
`all_scores_` : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
CVML/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
Winand/pandas | pandas/core/panel.py | 4 | 55784 | """
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import numpy as np
import warnings
from pandas.core.dtypes.cast import (
infer_dtype_from_scalar,
cast_scalar_to_array,
maybe_cast_item)
from pandas.core.dtypes.common import (
is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.core.dtypes.missing import notna
import pandas.core.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict)
from pandas.compat.numpy import function as nv
from pandas.core.common import _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_objs_combined_axis)
from pandas.io.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.core.reshape.util import cartesian_product
from pandas.util._decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
# deprecation GH13563
warnings.warn("\nPanel is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of 3-dimensional data are with a "
"MultiIndex on a DataFrame, via the "
"Panel.to_frame() method\n"
"Alternatively, you can use the xarray package "
"http://xarray.pydata.org/en/stable/.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
DeprecationWarning, stacklevel=3)
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
values = cast_scalar_to_array([len(x) for x in passed_axes],
data, dtype=dtype)
mgr = self._init_matrix(values, passed_axes, dtype=values.dtype,
copy=False)
copy = False
else: # pragma: no cover
raise ValueError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
from collections import defaultdict
orient = orient.lower()
if orient == 'minor':
new_data = defaultdict(OrderedDict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.loc[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func, try_cast=True):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
maybe_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
mat = cast_scalar_to_array(shape[1:], value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notna(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func, try_cast=True):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0, try_cast=True):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func, try_cast=True):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notna(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise ValueError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.core.reshape.concat import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_objs_combined_axis(data.values(), axis=axis,
intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| bsd-3-clause |
FAB4D/humanitas | analysis/preproc/downsample.py | 1 | 2802 | import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from df_build_func import get_all_dates, mod_header
from time import time
description = '''
This script downsamples a daily dataset to a weekly one (on Fridays).
Then apply NaN analysis on it.
Author: Ching-Chia
'''
# csv_in = os.getcwd()+'/../../data/india/csv_preprocessed/preproc_wholesale_daily/india_original_wholesale_daily_0.4.csv'
# csv_out = os.getcwd()+'/../../data/india/csv_preprocessed/india_original_wholesale_weekly_0.4_downsampled.csv'
# csv_in = os.getcwd()+'/india_original_wholesale_daily_0.6.csv'
# csv_out = os.getcwd()+'/india_original_wholesale_weekly_0.6_downsampled.csv'
csv_in = os.getcwd()+'/india_original_retail_daily_0.4.csv'
csv_out = os.getcwd()+'/india_original_retail_weekly_0.4_downsampled.csv'
#set_first_friday = '2005-01-07' #for wholesale daily
set_first_friday = '2009-01-02' #for retail daily
# def downsample(df):
if __name__ == "__main__":
start_time = time()
df = pd.read_csv(csv_in)
df['date'] = pd.to_datetime(df['date'])
all_dates = sorted(list(set(df['date'])))
print 'date starting from', all_dates[0]
first_friday = pd.to_datetime(set_first_friday, format='%Y-%m-%d')
idx_first_friday = all_dates.index(first_friday)
print 'please verify first friday ==', set_first_friday
ddf = pd.DataFrame()
# ddf_ts = pd.DataFrame()
rows = []
count = 0
df.set_index('date',inplace=True)
for (state, city, product, subproduct, country, freq), group in \
df.groupby(['state', 'city','product','subproduct', 'country', 'freq']):
print (state, city, product, subproduct, country, freq)
#sum up all dates from Sat to Fri
#count non-NaNs
#take the average
i = idx_first_friday+1
# pieces = []
while(i < len(all_dates)):
if(i+7 > len(all_dates)):
break
j = i+7
avg_price = group.loc[all_dates[i:j]].mean(skipna=True).price
friday = all_dates[j-1]
#for ddf_ts
# pieces.append(pd.DataFrame([avg_price], index=[friday]))
#for ddf
row = group.loc[[friday],:].copy() #small trick to return DataFrame instead of Series
row.price = avg_price
row.reset_index(inplace=True)
row.columns = mod_header(row.columns)
rows.append(row)
# print all_dates[i:j]
i += 7
# ddf_ts[(state, city, product, subproduct)] = pd.concat(pieces)[0]
count += 1
ddf = pd.concat(rows)
ddf['date'] = pd.to_datetime(ddf['date'])
ddf = ddf.sort('date')
ddf.to_csv(csv_out, index=False)
print 'Elapsed time:', (time()-start_time)/60., 'minutes'
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
walterreade/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | 39945 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
AlexanderFabisch/scikit-learn | examples/gaussian_process/plot_gpc_isoprobability.py | 45 | 3025 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as pl
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = pl.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.clim(0, 1)
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
abitofalchemy/hrg_nets | pami_pgd_fast.py | 1 | 4043 | #!/usr/bin/env python
__version__="0.1.0"
__author__ = 'tweninge'
__author__ = 'saguinag'
# Version: 0.1.0 Forked from cikm_experiments.py
#
# ToDo:
#
import cPickle
import subprocess
import traceback
import argparse
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import networkx as nx
import pandas as pd
import pprint as pp
import multiprocessing as mp
from multiprocessing import Process
from glob import glob
results = []
def get_parser ():
parser = argparse.ArgumentParser(description='Pami compute the metrics HRG, PHRG, ' +\
'CLGM, KPGM.\nexample: \n'+ \
' python pami.py --orig ./datasets/out.radoslaw_email_email --netstat')
parser.add_argument('--orig', nargs=1, required=True, help="Reference (edgelist) input file.")
parser.add_argument('--stats', action='store_true', default=0, required=0, \
help="print xphrg mcs tw")
parser.add_argument('--version', action='version', version=__version__)
return parser
def run_pgd_on_edgelist(fname,graph_name):
import platform
if platform.system() == "Linux":
args = ("bin/linux/pgd", "-f", "{}".format(fname), "--macro", "Results_Graphlets/{}_{}.macro".format(graph_name, \
os.path.basename(fname)))
print args
else:
args = ("bin/macos/pgd", "-f {}".format(fname), "--macro Results_Graphlets/{}_{}.macro".format(graph_name, fname))
pgdout = ""
while not pgdout:
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
pgdout = output.split('\n')
print "[o]"
return
def hstar_nxto_tsv(G,gname, ix):
import tempfile
with tempfile.NamedTemporaryFile(dir='/tmp', delete=False) as tmpfile:
tmp_fname = tmpfile
nx.write_edgelist(G, tmp_fname, delimiter=",")
if os.path.exists(tmp_fname.name):
run_pgd_on_edgelist(tmp_fname.name, gname)
else:
print "{} file does not exists".format(tmp_fname)
return tmp_fname
def graphlet_stats(args):
from vador.graph_name import graph_name
gname = vador.graph_name(args['orig'][0])
print "==>", gname
rows_2keep =["total_3_tris",
"total_2_star",
"total_4_clique",
"total_4_chordcycle",
"total_4_tailed_tris",
"total_4_cycle",
"total_3_star",
"total_4_path"]
files = glob("Results_Graphlets/{}*.macro".format(gname))
if len(files) ==0:
print "!!!> no macro files"
return
mdf = pd.DataFrame()
for j,f in enumerate(files):
df = pd.read_csv(f, delimiter=r" ", header=None)
df.columns=['a','b','c']
df = df.drop('b', axis=1)
local_df = []
for r in rows_2keep:
local_df.append(df[df['a'] == r].values[0])
local_df = pd.DataFrame(local_df)
local_df.columns = ['a','c']
if j == 0: mdf = local_df
mdf = mdf.merge(local_df, on='a')
df = pd.DataFrame(list(mdf['a'].values))
df['mu'] = mdf.apply(lambda x: x[1:].mean(), axis=1)
df['st'] = mdf.apply(lambda x: x[1:].std(), axis=1)
df['se'] = mdf.apply(lambda x: x[1:].sem(), axis=1)
df['dsc'] = mdf.apply(lambda x: x[1:].count(), axis=1)
print df
return
def collect_results(result):
results.extend(result)
def main(args):
from vador.graph_name import graph_name
if not args['orig']: return
if args['stats']:
graphlet_stats(args)
exit()
else:
print ("File: %s" % args['orig'][0])
with open(args['orig'][0], "rb") as f:
c = cPickle.load(f)
print " ==> loaded the pickle file"
gname = graph_name(args['orig'][0])
print " ==>", type(c), len(c), gname
if isinstance(c, dict):
if len(c.keys()) == 1:
c = c.values()[0]
else:
print c.keys()
## --
p = mp.Pool(processes=20)
for j,gnx in enumerate(c):
if isinstance (gnx, list):
gnx = gnx[0]
#Process(target=hstar_nxto_tsv, args=(gnx,gname,j, )).start()
p.apply_async(hstar_nxto_tsv, args=(gnx,gname,j,), callback=collect_results)
p.close()
p.join()
print(results)
return
if __name__ == '__main__':
''' PAMI
arguments:
'''
parser = get_parser()
args = vars(parser.parse_args())
try:
main(args)
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| gpl-3.0 |
miltonsarria/dsp-python | audio/2_enframe.py | 1 | 2258 | import os
import matplotlib.pyplot as plt
import numpy as np
import wav_process as wp
from wav_rw import wavread
from scipy.signal import get_window
#cambiar el nombre del archivo
#archivo='sound/sines-440-602-transient.wav'
archivo='sound/speech-female.wav'
######################
# modificar el rango de frecuencias para ver una banda especifica
rango =[0.0, 8000.0]
(fs,x)=wavread(archivo)
x=x/np.abs(x).max()
print('frecuencia de muestreo: ' + str(fs) + ', numero de muestras: ' + str(x.size))
long_ventana = 0.025 #en segundos
incremento = 0.01#en segundos
#en muestras
M = int(fs * long_ventana)
H = int(fs * incremento)
#tipo de ventana, window puede ser rectangular, hanning, hamming, blackman, blackmanharris
window = 'rectangular'
N = 2048 #si usa una longitud de ventana superior a 0.05 usar 4096
(X,mX)=wp.enframe(x=x, window = window, M=M, H=H, N=N)
# graficar el archivo de audio
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('input sound: x')
# graficar la magnitud del espectro en decibeles
plt.subplot(2,1,2)
numFrames = int(X[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq=np.linspace(0,fs/2,mX.shape[1])
bins=(binFreq>rango[0]) & (binFreq<rango[1])
Sxx=np.transpose(mX[:,bins])
binFreq=binFreq[bins]
plt.pcolormesh(frmTime,binFreq,Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.autoscale(tight=True)
#graficar el espectro de todo el archivo de audio sin ventanear
M = x.size
w = get_window(window, M)
w = w / sum(w)
xw = x*w
mX,pX = wp.magFourier(xw,xw.size)
binFreq=np.linspace(0,fs/2,mX.size)
bins=(binFreq>rango[0]) & (binFreq<rango[1])
plt.figure(2)
plt.plot(binFreq[bins],mX[bins])
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitud en dB')
e=np.zeros([X.shape[0]])
s=np.zeros([X.shape[0]])
fr=0.8
for i in np.arange(1,X.shape[0]):
e[i]=np.hstack((np.abs(X[i]),e[i-1]*fr)).max()
s[i]=(X[i]**2).sum()/X[i].size
#ei = np.interp(np.arange(x.size)/float(fs), frmTime, e)
t=np.arange(x.size)/float(fs)
plt.figure(3)
plt.plot(frmTime,s/s.max(),'g')
plt.plot(frmTime,e/e.max())
plt.plot(t,np.abs(x),'r')
plt.show()
| mit |
mfjb/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/preprocessing/data.py | 13 | 70436 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. "
"Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| mit |
marionleborgne/nupic | examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py | 17 | 6193 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'kw_energy_consumption', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "%s_out.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def refreshGUI(self):
"""Give plot a pause, so data is drawn and GUI's event loop can run.
"""
plt.pause(0.0001)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/backends/backend_mixed.py | 11 | 5673 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib._image import frombuffer
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.tight_bbox import process_figure_for_rasterizing
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
figure: The figure instance.
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
assert not vector_renderer.option_image_nocomposite()
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure=figure
self._figdpi = figure.get_dpi()
self._bbox_inches_restore = bbox_inches_restore
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
start_filter stop_filter draw_gouraud_triangle
draw_gouraud_triangles option_scale_image
_text2path _get_text_path_transform height width
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
# change the dpi of the figure temporarily.
self.figure.set_dpi(self.dpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore)
self._bbox_inches_restore = r
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
width, height = self._width * self.dpi, self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = frombuffer(buffer, w, h, True)
image.is_grayscale = False
image.flipud_out()
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
float(l) / self.dpi * self._figdpi,
(float(height)-b-h) / self.dpi * self._figdpi,
image)
self._raster_renderer = None
self._rasterizing = False
# restore the figure dpi.
self.figure.set_dpi(self._figdpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._figdpi)
self._bbox_inches_restore = r
| mit |
jakevdp/seaborn | seaborn/distributions.py | 1 | 20383 | """Plotting functions for visualizing distributions."""
from __future__ import division
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .utils import set_hls_values, iqr, _kde_support
from .palettes import color_palette, blend_palette
from .axisgrid import JointGrid
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to 10 bins if iqr is 0
if h == 0:
return 10.
else:
return np.ceil((a.max() - a.min()) / h)
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a distribution of observations.
Parameters
----------
a : (squeezable to) 1d array
Observed data.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins, or None to use Freedman-Diaconis rule.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following an grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, oberved values are on y-axis.
norm_hist : bool, otional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevent component of the plot
ax : matplotlib axis, optional
if provided, plot on this axis
Returns
-------
ax : matplotlib axis
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a).squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("normed", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
pdf = lambda x: fit.pdf(x, *params)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently"
"only implemented in statsmodels."
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
alpha = kwargs.get("alpha", 0.25)
if shade:
if vertical:
ax.fill_betweenx(y, 1e-12, x, color=color, alpha=alpha)
else:
ax.fill_between(x, 1e-12, y, color=color, alpha=alpha)
# Draw the legend here
if legend:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)()
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, kernel, bw, gridsize, cut, clip, axlabel,
ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
if isinstance(cmap, str):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
contour_func(xx, yy, z, n_levels, **kwargs)
kwargs["n_levels"] = n_levels
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, str):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
if isinstance(x, pd.Series):
x = x.values
if isinstance(y, pd.Series):
y = y.values
kde = smnp.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
elif np.isscalar(bw):
bw_x, bw_y = bw, bw
else:
msg = ("Cannot specify a different bandwidth for each dimension "
"with the scipy backend. You should install statsmodels.")
raise ValueError(msg)
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True, ax=None,
cumulative=False, **kwargs):
"""Fit and plot a univariate or bivarate kernel density estimate.
Parameters
----------
data : 1d or 2d array-like
Input data. If two-dimensional, assumed to be shaped (n_unit x n_var),
and a bivariate contour plot will be drawn.
data2: 1d array-like
Second input data. If provided `data` must be one-dimensional, and
a bivariate plot is produced.
shade : bool, optional
If true, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optoinal
If True, add a legend or label the axes when possible.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
cumulative : bool
If draw, draw the cumulative distribution estimated by the kde.
kwargs : other keyword arguments for plot()
Returns
-------
ax : matplotlib axis
Axis with plot.
"""
if ax is None:
ax = plt.gca()
data = data.astype(np.float64)
if data2 is not None:
data2 = data2.astype(np.float64)
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, kernel, bw, gridsize,
cut, clip, legend, ax, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=None, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of datapoints.
height : scalar, optional
Height of ticks, if None draw at 5% of axis range.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axis
Axis to draw plot into; otherwise grabs current axis.
kwargs : other keyword arguments for plt.plot()
Returns
-------
ax : matplotlib axis
Axis with rugplot.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", None)
if vertical is not None:
axis = "y" if vertical else "x"
other_axis = dict(x="y", y="x")[axis]
min, max = getattr(ax, "get_%slim" % other_axis)()
if height is None:
range = max - min
height = range * .05
if axis == "x":
ax.plot([a, a], [min, min + height], **kwargs)
else:
ax.plot([min, min + height], [a, a], **kwargs)
return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
color=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None,
joint_kws=None, marginal_kws=None, annot_kws=None):
"""Draw a plot of two variables with bivariate and univariate graphs.
Parameters
----------
x, y : strings or vectors
Data or names of variables in `data`.
data : DataFrame, optional
DataFrame when `x` and `y` are variable names.
kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
Kind of plot to draw.
stat_func : callable or None
Function used to calculate a statistic about the relationship and
annotate the plot. Should map `x` and `y` either to a single value
or to a (value, p) tuple. Set to ``None`` if you don't want to
annotate the plot.
color : matplotlib color, optional
Color used for the plot elements.
size : numeric, optional
Size of the figure (it will be square).
ratio : numeric, optional
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
{joint, marginal, annot}_kws : dicts
Additional keyword arguments for the plot components.
Returns
-------
grid : JointGrid
JointGrid object with the plot on it.
See Also
--------
JointGrid : The Grid class used for drawing this plot. Use it directly if
you need more flexibility.
"""
# Set up empty default kwarg dicts
if joint_kws is None:
joint_kws = {}
if marginal_kws is None:
marginal_kws = {}
if annot_kws is None:
annot_kws = {}
# Make a colormap based off the plot color
if color is None:
color = color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Initialize the JointGrid object
grid = JointGrid(x, y, data, dropna=dropna,
size=size, ratio=ratio, space=space,
xlim=xlim, ylim=ylim)
# Plot the data using the grid
if kind == "scatter":
joint_kws.setdefault("color", color)
grid.plot_joint(plt.scatter, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = _freedman_diaconis_bins(grid.x)
y_bins = _freedman_diaconis_bins(grid.y)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("kde"):
joint_kws.setdefault("shade", True)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("shade", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("reg"):
from .linearmodels import regplot
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
from .linearmodels import residplot
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", False)
distplot(x, ax=grid.ax_marg_x, **marginal_kws)
distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
**marginal_kws)
stat_func = None
else:
msg = "kind must be either 'scatter', 'reg', 'resid', 'kde', or 'hex'"
raise ValueError(msg)
if stat_func is not None:
grid.annotate(stat_func, **annot_kws)
return grid
| bsd-3-clause |
Winand/pandas | pandas/core/computation/scope.py | 9 | 9228 | """
Module for scope operations
"""
import sys
import struct
import inspect
import datetime
import itertools
import pprint
import numpy as np
import pandas
import pandas as pd # noqa
from pandas.compat import DeepChainMap, map, StringIO
from pandas.core.base import StringMixin
import pandas.core.computation as compu
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target)
def _replacer(x):
"""Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
_DEFAULT_GLOBALS = {
'Timestamp': pandas._libs.lib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
'list': list,
'tuple': tuple,
'inf': np.inf,
'Inf': np.inf,
}
def _get_pretty_string(obj):
"""Return a prettier version of obj
Parameters
----------
obj : object
Object to pretty print
Returns
-------
s : str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = 'level', 'scope', 'target', 'temps'
def __init__(self, level, global_dict=None, local_dict=None, resolvers=(),
target=None):
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self.update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
self.scope = self.scope.new_child((global_dict or
frame.f_globals).copy())
if not isinstance(local_dict, Scope):
self.scope = self.scope.new_child((local_dict or
frame.f_locals).copy())
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __unicode__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
unicode_str = '{name}(scope={scope_keys}, resolvers={res_keys})'
return unicode_str.format(name=type(self).__name__,
scope_keys=scope_keys,
res_keys=res_keys)
@property
def has_resolvers(self):
"""Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key, is_local):
"""Resolve a variable name in a possibly local context
Parameters
----------
key : text_type
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
"""Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes):
"""Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, 'f_' + scope)
self.scope = self.scope.new_child(d)
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack
def add_tmp(self, value):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__,
num=self.ntemps,
hex_id=_raw_hex_id(self))
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self):
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self):
"""Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| bsd-3-clause |
jlegendary/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
nhejazi/scikit-learn | examples/feature_selection/plot_select_from_model_boston.py | 146 | 1527 | """
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
| bsd-3-clause |
DonghoChoi/Exploration_Study | local/parse_KML_files.py | 2 | 5350 | #!/usr/bin/python
# Author: Dongho Choi
'''
This script
(1)
'''
import os.path
import datetime
import math
import time
import pandas as pd
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
import sys
import csv
#sys.path.append("./configs/")
sys.path.append("/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/Code/local/configs/")
import server_config # (1) info2_server (2) exploration_db
from xml.dom.minidom import parseString
import dateutil.parser
import pytz
from tzwhere import tzwhere
from timezonefinder import TimezoneFinder
import glob, os
if __name__ == "__main__":
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'],
charset='utf8')
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established")
# Get the participants list from the table of 'final_participants'
df_participants = pd.read_sql('SELECT * FROM final_participants', con=connection)
print("Participants Table READ")
userID_list = df_participants['userID'].tolist()
# Base director for google kml files
input_directory_base = "/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/UserStudy_Nov/google_kml/"
#for i in range(0, 1):
for i in range(0, len(userID_list)): # i - current userID
userID = userID_list[i]
#userID=0
input_directory_user = input_directory_base + str(userID) +"/"
os.chdir(input_directory_user)
print("################## userID: {0} ###################".format(userID))
# KML files in the user directory
for file in glob.glob("*.kml"):
print(file)
input_file = open(str(input_directory_user + file))
data = input_file.read()
input_file.close()
# Parse the string into a DOM
dom = parseString(data)
for d in dom.getElementsByTagName('Placemark'):
# Name of the Place
name_element = d.getElementsByTagName('name')
if (name_element[0].hasChildNodes()):
name = name_element[0].firstChild.data
else:
name = "NO_NAME"
# Coordinates
gx_track = d.getElementsByTagName('gx:Track')[0]
gx_coord_element = gx_track.getElementsByTagName('gx:coord')
if gx_coord_element.length > 0:
gx_coord = gx_track.getElementsByTagName('gx:coord')[0]
coords = gx_coord.firstChild.data.split(' ')
longitude = float(coords[0])
latitude = float(coords[1])
else:
break
#print("longitude:{0}, latitude:{1}".format(longitude, latitude))
# Get timezone of the coordinates
# t = datetime.tzinfo("America/New_York")
tf = TimezoneFinder()
tz = tzwhere.tzwhere(shapely=True)
timezone_str = tf.timezone_at(lng=longitude, lat=latitude)
# timezone_str = tzwhere.tzNameAt(37.3880961, -5.9823299)
#print("timezone of current location: {0}".format(timezone_str))
t = pytz.timezone(timezone_str)
# begin and end time of the visiting
timespan = d.getElementsByTagName('TimeSpan')[0]
begin = timespan.getElementsByTagName('begin')[0].firstChild.data
begin_datetime = dateutil.parser.parse(begin).astimezone(t)
end = timespan.getElementsByTagName('end')[0].firstChild.data
end_datetime = dateutil.parser.parse(end).astimezone(t)
begin_date = begin_datetime.date()
begin_time = begin_datetime.time()
end_date = end_datetime.date()
end_time = end_datetime.time()
#print("Begin date: {0}, Begin time: {1}, End date: {2}, End time: {3}".format(begin_date, begin_time,end_date, end_time))
# insert sql row using this data
sql = "INSERT INTO user_kml_data (userID,place_name,begin_datetime,end_datetime,begin_date,begin_time,end_date,end_time,longitude,latitude,timezone) VALUES ({0},'{1}','{2}','{3}','{4}','{5}','{6}','{7}',{8},{9},'{10}')". \
format(str(userID),str(connection.escape_string(name)),str(begin_datetime.strftime('%Y-%m-%d %H:%M:%S')),str(end_datetime.strftime('%Y-%m-%d %H:%M:%S')),str(begin_date),str(begin_time),\
str(end_date),str(end_time),str(longitude),str(latitude),str(timezone_str))
print(sql)
cursor.execute(sql)
server.stop()
| gpl-3.0 |
toobaz/pandas | pandas/tests/indexes/multi/test_copy.py | 2 | 2492 | from copy import copy, deepcopy
import pytest
from pandas import MultiIndex
import pandas.util.testing as tm
def assert_multiindex_copied(copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.codes, original.codes)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.codes, original.codes)
assert copy.codes is not original.codes
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(idx):
i_copy = idx.copy()
assert_multiindex_copied(i_copy, idx)
def test_shallow_copy(idx):
i_copy = idx._shallow_copy()
assert_multiindex_copied(i_copy, idx)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
idx.copy(labels=idx.codes)
def test_view(idx):
i_view = idx.view()
assert_multiindex_copied(i_view, idx)
@pytest.mark.parametrize("func", [copy, deepcopy])
def test_copy_and_deepcopy(func):
idx = MultiIndex(
levels=[["foo", "bar"], ["fizz", "buzz"]],
codes=[[0, 0, 0, 1], [0, 0, 1, 1]],
names=["first", "second"],
)
idx_copy = func(idx)
assert idx_copy is not idx
assert idx_copy.equals(idx)
@pytest.mark.parametrize("deep", [True, False])
def test_copy_method(deep):
idx = MultiIndex(
levels=[["foo", "bar"], ["fizz", "buzz"]],
codes=[[0, 0, 0, 1], [0, 0, 1, 1]],
names=["first", "second"],
)
idx_copy = idx.copy(deep=deep)
assert idx_copy.equals(idx)
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize(
"kwarg, value",
[
("names", ["thrid", "fourth"]),
("levels", [["foo2", "bar2"], ["fizz2", "buzz2"]]),
("codes", [[1, 0, 0, 0], [1, 1, 0, 0]]),
],
)
def test_copy_method_kwargs(deep, kwarg, value):
# gh-12309: Check that the "name" argument as well other kwargs are honored
idx = MultiIndex(
levels=[["foo", "bar"], ["fizz", "buzz"]],
codes=[[0, 0, 0, 1], [0, 0, 1, 1]],
names=["first", "second"],
)
return
idx_copy = idx.copy(**{kwarg: value, "deep": deep})
if kwarg == "names":
assert getattr(idx_copy, kwarg) == value
else:
assert [list(i) for i in getattr(idx_copy, kwarg)] == value
| bsd-3-clause |
adybbroe/atrain_match | atrain_match/reshaped_files_scr/pps_vrreport_plot_lwp_amsr_cloudsat.py | 1 | 9898 | import numpy as np
from glob import glob
import os
from matchobject_io import (readAmsrImagerMatchObj,
AmsrImagerTrackObject,
readCloudsatImagerMatchObj,
CloudsatImagerTrackObject)
import matplotlib.pyplot as plt
from utils.validate_lwp_util import get_lwp_diff_inner
from libs.truth_imager_statistics import get_lwp_diff_inner_cloudsat
from histogram_plotting import atrain_scatter
from utils.stat_util import (my_hist, my_iqr, my_rms, my_pex, my_mae)
from my_dir import ADIR
def my_label(data):
label = (#"{:s}\n"
"bias = {:3.1f}\n"
"RMS = {:3.1f}\n"
"median = {:3.1f}\n"
"MAE = {:3.1f}\n"
"IQR = {:3.1f}\n"
"PE>10 = {:3.1f}\n"
"N = {:d}\n".format(
#text
np.mean(data),
my_rms(data),
np.median(data),
my_mae(data),
my_iqr(data),
my_pex(data,10),
len(data)
))
return label
def do_the_printing(aObj, name):
conditions = {}
if "amsr" in aObj.truth_sat and len(aObj.imager.cpp_lwp.shape)==2:
xymax=170
diff, x, y, use = get_lwp_diff_inner(aObj, True)
lon = aObj.amsr.all_arrays["longitude"]
lat = aObj.amsr.all_arrays["latitude"]
use_map = use
conditions["use"] = use.copy()
use_surfs = [use]
surf_names = ["sea"]
scatter_bin_size=5.0
elif "amsr" in aObj.truth_sat:
y = aObj.amsr.lwp
x = aObj.imager.cpp_lwp
lon = aObj.imager.all_arrays["longitude"]
lat = aObj.imager.all_arrays["latitude"]
xymax=170
diff = x-y
use = np.ones(diff.shape, dtype=bool)
use_map = use
conditions["use"] = use.copy()
use_surfs = [use]
surf_names = ["sea"]
scatter_bin_size=5.0
elif "cloudsat" in aObj.truth_sat:
xymax=300
diff, lwp_diff_lo, x, y, use = get_lwp_diff_inner_cloudsat(aObj, True, wide_selection=True)
lon = aObj.cloudsat.all_arrays["longitude"]
lat = aObj.cloudsat.all_arrays["latitude"]
conditions["use"] = use.copy()
conditions["no_cloudsat_iwp"] = aObj.cloudsat.RVOD_ice_water_path<=0
conditions["no_cloudsat_precip"] = np.bitwise_and(np.right_shift(aObj.cloudsat.RVOD_CWC_status,2),1)==0
conditions["geoprof_cloudy"] = np.bitwise_and(np.right_shift(aObj.cloudsat.RVOD_CWC_status,0),10)==0
conditions["without_zeros"] = np.logical_and(aObj.imager.cpp_lwp>0,
aObj.cloudsat.RVOD_liq_water_path>0)
use_sea = np.logical_and(use,aObj.imager.fractionofland <=0)
use_land = np.logical_and(use,aObj.imager.fractionofland >0)
use_surfs = [use, use_sea, use_land]
surf_names = ["all", "sea", "land"]
scatter_bin_size=5.0
for use_t, surf in zip(use_surfs,surf_names):
use_i = use_t
for condition in ["use", "no_cloudsat_iwp", "no_cloudsat_precip", "geoprof_cloudy", "without_zeros"]:
if condition not in conditions.keys():
continue
use_i = np.logical_and(use_i, conditions[condition])
if surf == "all" and condition == "geoprof_cloudy":
use_map = use_i
#print x, y
diff_i = x[use_i]- y[use_i]
print name, surf, condition, np.mean(diff_i), my_rms(diff_i), my_iqr(diff_i), np.median(diff_i), np.std(diff_i), len(diff_i)
vmax = len(diff_i)*0.002
fig = plt.figure()
ax = fig.add_subplot(111)
atrain_scatter(fig, ax, x[use_i] , y[use_i], scatter_bin_size, xymin = 0, xymax=1000, vmax=vmax,
do_colorbar=True, to_km=1.0, ptype='scatter')
ax.plot([0,170],[0,170], ':w')
ax.set_xlabel("PPS LWP g/m^2")
if "amsr" in aObj.truth_sat:
ax.set_ylabel("AMSR-E LWP g/m^2")
else:
ax.set_ylabel("CPR (CloudSat) LWP g/m^2")
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/scatter_lwp_%s_%s_dist_all_cpp_%s_%s.png"%(aObj.truth_sat, name, surf, condition), bbox_inches='tight')
fig = plt.figure(figsize=(14, 4.5))
ax = fig.add_subplot(131)
hist_heights, x_, hist_heights_gaussian = my_hist(x[use_i]-y[use_i], None, bmin=-3000, bmax=3000, delta_h=5, return_also_corresponding_gaussian=True)
ax.fill(x_, hist_heights_gaussian, color='silver')
plt.plot(x_, hist_heights, "r-", label = my_label(x[use_i]-y[use_i]))
#ax.set_ylim(0,10)
#ax.set_xlim(-4,6)
ax.set_xlim([-500,600])
ax.set_ylim([00,14])
if "amsr" in aObj.truth_sat:
ax.set_xlabel("error: PPS - AMSR-E LWP g/m^2")
else:
ax.set_xlabel("error: PPS - CPR (CloudSat) LWP g/m^2")
ax.set_ylabel("percent of data")
#plt.yticks(np.arange(0,9,2.0))
plt.legend()
ax = fig.add_subplot(132)
b = atrain_scatter(fig, ax, x[use_i] , y[use_i], scatter_bin_size, xymin = 0, xymax=xymax, vmax=vmax, #140
do_colorbar=False, to_km=1.0, ptype='scatter')
ax.set_xlabel("PPS LWP g/m^2")
ax.plot([0,170],[0,170], ':w')
cax = fig.add_axes([0.57, 0.65, 0.03, 0.22])
cbar = fig.colorbar(b, cax=cax, )
if "amsr" in aObj.truth_sat:
ax.set_ylabel("AMSR-E LWP g/m^2")
else:
ax.set_ylabel("CPR (CloudSat) LWP g/m^2")
ax = fig.add_subplot(133)
hist_heights_x, x_, hist_heights_gaussian = my_hist(x[use_i], None, bmin=0, bmax=300, delta_h=5)
hist_heights_y, y_, hist_heights_gaussian = my_hist(y[use_i], None, bmin=0, bmax=300, delta_h=5)
plt.plot(x_, hist_heights_x, "r-", label = "PPS LWP")
my_label_s = "CPR (CloudSat) LWP"
if "amsr" in aObj.truth_sat:
my_label_s=("AMSR-E LWP")
plt.plot(y_, hist_heights_y, "b-", label = my_label_s)
plt.legend()
ax.set_ylim([0,14])
ax.set_xlabel("LWP g/m^2")
ax.set_ylabel("percent of data")
#plt.show()
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/scatter_and_error_lwp_%s_%s_dist_all_cpp_170_%s_%s.png"%(aObj.truth_sat, name, surf, condition), bbox_inches='tight')
#plt.show()
plt.close('all')
from trajectory_plotting import plotSatelliteTrajectory
import config
#plotSatelliteTrajectory(lon, lat,
# ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/map_marble_lwp_%s_dist_all"%(aObj.truth_sat),
# config.AREA_CONFIG_FILE,
# fig_type=['png'])
plotSatelliteTrajectory(lon[use_map], lat[use_map],
ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/map_marble_lwp_%s_%s_dist"%(aObj.truth_sat, name),
config.AREA_CONFIG_FILE,
fig_type=['png'])
fig = plt.figure()
ax = fig.add_subplot(111)
from histogram_plotting import distribution_map
distribution_map(lon[use_map],
lat[use_map])
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/VAL_2018_PLOTS/map_white_lwp_%s_%s_dist_all_cpp.png"%(aObj.truth_sat, name), bbox_inches='tight')
if __name__ == "__main__":
ROOT_DIR_v2014_amsr = (ADIR + "/DATA_MISC/reshaped_files_validation_2018/global_gac_v2014_created20180927/Reshaped_Files_merged_amsr_lwp/noaa18/5km/")
ROOT_DIR_v2018_amsr = (ADIR + "/DATA_MISC/reshaped_files_validation_2018/global_gac_v2018_created20180927/Reshaped_Files_merged_amsr_lwp/noaa18/5km/")
#ROOT_DIR_v2014_cl = (ADIR + "/DATA_MISC/reshaped_files_validation_2018/global_gac_v2014_created20180927/Reshaped_Files_merged/noaa18/5km/*/*cloudsat")
#ROOT_DIR_v2018_cl = (ADIR + "/DATA_MISC/reshaped_files_validation_2018/global_gac_v2018_created20180927/Reshaped_Files_merged/noaa18/5km/*/*cloudsat")
ROOT_DIR_v2018_modis_cl = (ADIR + "/DATA_MISC/reshaped_files_validation_2018/global_modis_v2018_created20181001_cmap_osiice_dust/Reshaped_Files_merged_cloudsat_lwp/eos2/1km/*/*/")
ROOT_DIR_v2018_modis_amsr = (ADIR + "/DATA_MISC/reshaped_files_validation_2018/global_modis_v2018_created20181001_cmap_osiice_dust/Reshaped_Files_merged_amsr_lwp/eos2/1km/*/*/")
ROOT_DIR_v2014_modis_amsr = (ADIR + "/DATA_MISC/reshaped_files_validation_2018/global_modis_v2014_created20180920/Reshaped_Files_merged_amsr_lwp/eos2/1km/*/*/")
files = glob(ROOT_DIR_v2018_modis_amsr + "*h5")
print files
name = files[0].split('global_')[1][0:11]
if "amsr" in os.path.basename(files[0]):
aObj = AmsrImagerTrackObject()
for filename in files:
print os.path.basename(filename)
aObj += readAmsrImagerMatchObj(filename)
else:
aObj = CloudsatImagerTrackObject()
for filename in files:
print os.path.basename(filename)
if os.path.basename(filename) in [
"5km_noaa18_20071202_1600_99999_cloudsat_imager_match.h5"
]:
continue
aObj_new = readCloudsatImagerMatchObj(filename)
if aObj_new.cloudsat.RVOD_liq_water_path is None:
print os.path.basename(filename)
if len(aObj_new.cloudsat.RVOD_liq_water_path) == len(aObj_new.imager.cpp_lwp):
print "ok", os.path.basename(filename)
aObj += aObj_new
do_the_printing(aObj, name)
| gpl-3.0 |
ghareth/bibulous | doc/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| mit |
cassinius/right-to-forget-data | src/binary_class/different_classifiers.py | 1 | 4876 | import pandas as pd
import numpy as np
import sklearn as skl
from sklearn import svm
from sklearn import ensemble
import sklearn.preprocessing as preprocessing
import sklearn.model_selection as ms
import sklearn.metrics as metrics
import sklearn.tree as tree
import math
import os, csv, glob
import sklearn.linear_model as linear_model
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.svm import SVC
INPUT_CSV_INCOME = '../../data/anonymization/adults_target_income/original/'
OUTPUT_CSV = '../../output/anonymization/adults_target_income/original/'
def runClassifier(input_file):
original_data = pd.read_csv(
input_file,
names=[
"age", "fnlwgt", "education-num", "capital-gain", "capital-loss", "hours-per-week", "workclass",
"native-country", "sex", "race", "marital-status", "relationship", "occupation", "income"
],
header=0,
# index_col=0,
sep=r'\s*,\s*',
engine='python',
na_values="?")
# Binary features
binary_data = pd.get_dummies(original_data)
# Let's fix the Target as it will be converted to dummy vars too
binary_data["income"] = binary_data["income_>50K"]
del binary_data["income_<=50K"]
del binary_data["income_>50K"]
# Split training and test sets
X_train, X_test, y_train, y_test = ms.train_test_split(binary_data[binary_data.columns.difference(["income"])],
binary_data["income"], train_size=0.80)
scaler = preprocessing.StandardScaler()
X_train = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
X_test = scaler.transform(X_test)
# LOGISTIC REGRESSION
# cls = linear_model.LogisticRegression()
# LINEAR SVC
# cls = svm.LinearSVC()
# SVC
# Too bad results
# cls = svm.SVC(kernel="rbf", verbose=2)
# ENSEMBLE SVM
# n_estimators = 10
# cls = OneVsRestClassifier(
# BaggingClassifier(SVC(kernel='linear', probability=True, class_weight='balanced'), max_samples=1.0 / n_estimators,
# n_estimators=n_estimators))
# GRADIENT BOOSTING
# cls = ensemble.GradientBoostingClassifier(learning_rate=0.1, max_depth=5, verbose=0)
# RANDOM FOREST
cls = ensemble.RandomForestClassifier(n_estimators=100, criterion="gini", max_features=None, verbose=0, n_jobs=-1)
# Run the actual training and prediction phases
cls.fit(X_train, y_train)
y_pred = cls.predict(X_test)
precision = skl.metrics.precision_score(y_test, y_pred)
recall = skl.metrics.recall_score(y_test, y_pred)
f1 = skl.metrics.f1_score(y_test, y_pred)
accuracy = metrics.accuracy_score(y_test, y_pred, normalize=True, sample_weight=None)
return [precision, recall, f1, accuracy]
# COMPILING RESULTS
def computeOriginalData():
print( runClassifier( INPUT_CSV_INCOME + 'adults_original_dataset.csv' ) )
def computeAllResults():
filelist = [f for f in sorted(os.listdir(INPUT_CSV_INCOME)) if f.endswith(".csv")]
with open(OUTPUT_CSV + "results_random_forest.csv", 'w') as fout:
writer = csv.writer(fout, lineterminator='\n')
writer.writerow(["dataset", "precision", "recall", "F1 score", "accuracy"])
final_results = {}
intermediary_results = {}
for input_file in filelist:
intermediary_results[input_file] = {}
intermediary_results[input_file]["precision"] = []
intermediary_results[input_file]["recall"] = []
intermediary_results[input_file]["f1"] = []
intermediary_results[input_file]["accuracy"] = []
for i in range(0,10):
scores = runClassifier(INPUT_CSV_INCOME + input_file)
intermediary_results[input_file]["precision"].append(scores[0])
intermediary_results[input_file]["recall"].append(scores[1])
intermediary_results[input_file]["f1"].append(scores[2])
intermediary_results[input_file]["accuracy"].append(scores[3])
print( [ input_file, i, scores[0], scores[1], scores[2], scores[3] ] )
for input_file in filelist:
final_results[input_file] = {}
final_results[input_file]["precision"] = np.mean(intermediary_results[input_file]["precision"])
final_results[input_file]["recall"] = np.mean(intermediary_results[input_file]["recall"])
final_results[input_file]["f1"] = np.mean(intermediary_results[input_file]["f1"])
final_results[input_file]["accuracy"] = np.mean(intermediary_results[input_file]["accuracy"])
writer.writerow( [input_file,
final_results[input_file]["precision"],
final_results[input_file]["recall"],
final_results[input_file]["f1"],
final_results[input_file]["accuracy"]])
print( final_results )
if __name__ == "__main__":
# computeOriginalData()
computeAllResults()
| apache-2.0 |
grlee77/scipy | scipy/special/_precompute/wright_bessel.py | 12 | 12928 | """Precompute coefficients of several series expansions
of Wright's generalized Bessel function Phi(a, b, x).
See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x.
"""
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
from scipy.integrate import quad
from scipy.optimize import minimize_scalar, curve_fit
from time import time
try:
import sympy # type: ignore[import]
from sympy import EulerGamma, Rational, S, Sum, \
factorial, gamma, gammasimp, pi, polygamma, symbols, zeta
from sympy.polys.polyfuncs import horner # type: ignore[import]
except ImportError:
pass
def series_small_a():
"""Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.
"""
order = 5
a, b, x, k = symbols("a b x k")
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas)
# Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i])
expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
expression = gamma(b)/sympy.exp(x) * expression
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(horner((term/x_part).simplify()))
s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n"
s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n"
for name, c in zip(['A', 'X', 'B'], [A, X, B]):
for i in range(len(c)):
s += f"\n{name}[{i}] = " + str(c[i])
return s
# expansion of digamma
def dg_series(z, n):
"""Symbolic expansion of digamma(z) in z=0 to order n.
See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2
"""
k = symbols("k")
return -1/z - EulerGamma + \
sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1))
def pg_series(k, z, n):
"""Symbolic expansion of polygamma(k, z) in z=0 to order n."""
return sympy.diff(dg_series(z, n+k), z, k)
def series_small_a_small_b():
"""Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5.
Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and
polygamma functions.
digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2)
digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2)
polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2)
and so on.
"""
order = 5
a, b, x, k = symbols("a b x k")
M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3")
c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3}
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas expanded)
C = [] # terms that generate B
# Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i])
# B[0] = 1
# B[k] = sum(C[k] * b**k/k!, k=0..)
# Note: C[k] can be obtained from a series expansion of 1/gamma(b).
expression = gamma(b)/sympy.exp(x) * \
Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
# expansion of polygamma part with 1/gamma(b)
pg_part = term/x_part/gamma(b)
if n >= 1:
# Note: highest term is digamma^n
pg_part = pg_part.replace(polygamma,
lambda k, x: pg_series(k, x, order+1+n))
pg_part = (pg_part.series(b, 0, n=order+1-n)
.removeO()
.subs(polygamma(2, 1), -2*zeta(3))
.simplify()
)
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(pg_part)
# Calculate C and put in the k!
C = sympy.Poly(B[1].subs(c_subs), b).coeffs()
C.reverse()
for i in range(len(C)):
C[i] = (C[i] * factorial(i)).simplify()
s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5."
s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n"
s += "B[0] = 1\n"
s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n"
s += "\nM_PI = pi"
s += "\nM_EG = EulerGamma"
s += "\nM_Z3 = zeta(3)"
for name, c in zip(['A', 'X'], [A, X]):
for i in range(len(c)):
s += f"\n{name}[{i}] = "
s += str(c[i])
# For C, do also compute the values numerically
for i in range(len(C)):
s += f"\n# C[{i}] = "
s += str(C[i])
s += f"\nC[{i}] = "
s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)})
.evalf(17))
# Does B have the assumed structure?
s += "\n\nTest if B[i] does have the assumed structure."
s += "\nC[i] are derived from B[1] allone."
s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .."
test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)])
test = (test - B[2].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .."
test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)])
test = (test - B[3].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
return s
def asymptotic_series():
"""Asymptotic expansion for large x.
Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
Z = (a*x)^(1/(1+a))
Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and
a_1). With slightly different notation, Paris (2017) lists coefficients
c_k up to order k=3.
Paris (2017) uses ZP = (1+a)/a * Z (ZP = Z of Paris) and
C_k = C_0 * (-a/(1+a))^k * c_k
"""
order = 8
class g(sympy.Function):
"""Helper function g according to Wright (1935)
g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...)
Note: Wright (1935) uses square root of above definition.
"""
nargs = 3
@classmethod
def eval(cls, n, rho, v):
if not n >= 0:
raise ValueError("must have n >= 0")
elif n == 0:
return 1
else:
return g(n-1, rho, v) \
+ gammasimp(gamma(rho+2+n)/gamma(rho+2)) \
/ gammasimp(gamma(3+n)/gamma(3))*v**n
class coef_C(sympy.Function):
"""Calculate coefficients C_m for integer m.
C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of
Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b)
* g(rho, v)^(-m-1/2)
"""
nargs = 3
@classmethod
def eval(cls, m, rho, beta):
if not m >= 0:
raise ValueError("must have m >= 0")
v = symbols("v")
expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2))
res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m)
res = res * (gamma(m + Rational(1, 2)) / (2*pi)
* (2/(rho+1))**(m + Rational(1, 2)))
return res
# in order to have nice ordering/sorting of expressions, we set a = xa.
xa, b, xap1 = symbols("xa b xap1")
C0 = coef_C(0, xa, b)
# a1 = a(1, rho, beta)
s = "Asymptotic expansion for large x\n"
s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n"
s += " * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n"
s += "Z = pow(a * x, 1/(1+a))\n"
s += "A[k] = pow(a, k)\n"
s += "B[k] = pow(b, k)\n"
s += "Ap1[k] = pow(1+a, k)\n\n"
s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n"
for i in range(1, order+1):
expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify()
factor = [x.denominator() for x in sympy.Poly(expr).coeffs()]
factor = sympy.lcm(factor)
expr = (expr * factor).simplify().collect(b, sympy.factor)
expr = expr.xreplace({xa+1: xap1})
s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n"
s += f"C[{i}] *= {str(expr)}\n\n"
import re
re_a = re.compile(r'xa\*\*(\d+)')
s = re_a.sub(r'A[\1]', s)
re_b = re.compile(r'b\*\*(\d+)')
s = re_b.sub(r'B[\1]', s)
s = s.replace('xap1', 'Ap1[1]')
s = s.replace('xa', 'a')
# max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10
# or more digits.
re_digits = re.compile(r'(\d{10,})')
s = re_digits.sub(r'\1.', s)
return s
def optimal_epsilon_integral():
"""Fit optimal choice of epsilon for integral representation.
The integrand of
int_0^pi P(eps, a, b, x, phi) * dphi
can exhibit oscillatory behaviour. It stems from the cosine of P and can be
minimized by minimizing the arc length of the argument
f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi
of cos(f(phi)).
We minimize the arc length in eps for a grid of values (a, b, x) and fit a
parametric function to it.
"""
def fp(eps, a, b, x, phi):
"""Derivative of f w.r.t. phi."""
eps_a = np.power(1. * eps, -a)
return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b
def arclength(eps, a, b, x, epsrel=1e-2, limit=100):
"""Compute Arc length of f.
Note that the arg length of a function f fro t0 to t1 is given by
int_t0^t1 sqrt(1 + f'(t)^2) dt
"""
return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2),
0, np.pi,
epsrel=epsrel, limit=100)[0]
# grid of minimal arc length values
data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
data_b = [0, 1, 4, 7, 10]
data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4]
data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x)
data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(),
data_x.flatten())
best_eps = []
for i in range(data_x.size):
best_eps.append(
minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i],
data_x[i]),
bounds=(1e-3, 1000),
method='Bounded', options={'xatol': 1e-3}).x
)
best_eps = np.array(best_eps)
# pandas would be nice, but here a dictionary is enough
df = {'a': data_a,
'b': data_b,
'x': data_x,
'eps': best_eps,
}
def func(data, A0, A1, A2, A3, A4, A5):
"""Compute parametric function to fit."""
a = data['a']
b = data['b']
x = data['x']
return (A0 * b * np.exp(-0.5 * a)
+ np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a)
+ A4 / (1 + np.exp(A5 * a))))
func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
s = "Fit optimal eps for integrand P via minimal arc length\n"
s += "with parametric function:\n"
s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n"
s += " - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n"
s += "Fitted parameters A0 to A5 are:\n"
s += ', '.join(['{:.5g}'.format(x) for x in func_params])
return s
def main():
t0 = time()
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('action', type=int, choices=[1, 2, 3, 4],
help='chose what expansion to precompute\n'
'1 : Series for small a\n'
'2 : Series for small a and small b\n'
'3 : Asymptotic series for large x\n'
' This may take some time (>4h).\n'
'4 : Fit optimal eps for integral representation.'
)
args = parser.parse_args()
switch = {1: lambda: print(series_small_a()),
2: lambda: print(series_small_a_small_b()),
3: lambda: print(asymptotic_series()),
4: lambda: print(optimal_epsilon_integral())
}
switch.get(args.action, lambda: print("Invalid input."))()
print("\n{:.1f} minutes elapsed.\n".format((time() - t0)/60))
if __name__ == '__main__':
main()
| bsd-3-clause |
saskartt/P4UL | pyAnalyze/approachAnalysis.py | 1 | 8459 | #!/usr/bin/env python
import sys
import subprocess as sb
import numpy as np
import argparse
from utilities import filesFromList, writeLog
from plotTools import userLabels, extractFromCSV, addToPlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
[email protected]
University of Helsinki &
Finnish Meteorological Institute
'''
#======== Function definitions =============================#
def p2pMaxMin( r ):
# Peak to peak max and min evaluation routine.
dr = (r[1:] - r[:-1])
fpos = (dr>=0.).astype(int)
fneg = (dr<0.).astype(int)
rp_cum = 0.; rn_cum = 0.
rp_max = 0.; rn_min = 0.
i = 0
for fp in fpos:
if( fp == 0 ):
if( rp_cum > rp_max ): rp_max = rp_cum
rp_cum = 0.
rp_cum += float(fp)*dr[i]; i+=1
#print('rp_cum[{}] = {} '.format(i,rp_cum))
i = 0
for fn in fneg:
if( fn == 0 ):
if( rn_cum < rn_min ): rn_min = rn_cum
rn_cum = 0.
rn_cum += float(fn)*dr[i]; i+=1
#print('rn_cum[{}] = {} '.format(i,rn_cum))
return rp_max, rn_min
#==========================================================#
parser = argparse.ArgumentParser(prog='approachAnalysis.py')
parser.add_argument("strKey", help="Search string for collecting files.",nargs='?',\
default=".csv")
parser.add_argument("--magy", help="Magnitude of all variables.", action="store_true",\
default=False)
parser.add_argument("--yx", help="Reverse axes: plot(x,y) --> plot(y,x)", action="store_true",\
default=False)
parser.add_argument("--labels", help="User specified labels.", action="store_true",\
default=False)
parser.add_argument("--reuse", help="Reuse once specified variable selections.", action="store_true",\
default=False)
parser.add_argument("-v", "--var", help="Variable Name in CSV-file", type=str, nargs='+',\
default=['u','v','w'] )
parser.add_argument("-yl","--ylims", help="Y-axis limits: [min,max]. Default=[0,10]",\
type=float,nargs=2,default=[0.,10.])
parser.add_argument("-fn","--figName", help="Name of the (temporary) figures. (default=tmp)",\
type=str,default="tmp")
parser.add_argument("-fa","--fileAnim", help="Name of the animation file. (default=anim.gif)",\
type=str,default="anim.gif")
parser.add_argument("-na", "--noAnim", help="Do not make an animation.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
strKey = args.strKey
figName = args.figName
fileAnim = args.fileAnim
noAnimation = args.noAnim
ylims = args.ylims
varList = args.var
fileNos, fileList = filesFromList( "*"+strKey+"*" )
print(' The varList [-v, --var] option is over ridden at this point. ')
print(' Reading coordinate values from file {} ...'.format( fileList[0]) )
coordList = [ 'arc_length', 'Points:0', 'Points:1', 'Points:2']
xv = extractFromCSV( fileList[0] , coordList )
s = xv[0].copy() # arc_length
x = xv[1].copy(); y = xv[2].copy(); z = xv[3].copy()
xv = None
print(' Done.\n')
# -------------------------------------------------------- #
print(' Computing the mean velocity values ... ')
varList = ['u', 'v', 'w']
Ux_mean = None; Uy_mean = None; Uz_mean = None
n = 0
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList )
u = tv[0].copy(); v = tv[1].copy(); w = tv[2].copy()
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
u[np.isnan(u)] = 0.; v[np.isnan(v)] = 0.; w[np.isnan(w)] = 0.
# Accumulate sums for mean values.
if( Ux_mean == None ):
Ux_mean = np.zeros( u.shape ) # Initialize
Uy_mean = np.zeros( u.shape )
Uz_mean = np.zeros( u.shape )
Ux_mean += u; Uy_mean += v; Uz_mean += w
# Use the sums to compute the mean values.
Ux_mean /= float(n); Uy_mean /= float(n); Uz_mean /= float(n)
print(' Done.\n')
# -------------------------------------------------------- #
print(' Extract directional data from the approach line ... ')
#pfig = plotCSV( pfig, fileList[fn], args.yx, args.magy, args.reuse )
rad2deg = 180./np.pi
deg2rad = np.pi/180.
# Starting point: Rissala's approach line (from Paraview)
p1 = np.array([ x[0], y[0], z[0] ]) # np.array([6800., 1250., 0.])
p2 = np.array([ x[-1],y[-1],z[-1] ]) # np.array([7700., 650., 72.])
da = p2 - p1
da_mag = np.sqrt( np.sum( da**2 ) )
da_xy = np.sqrt( np.sum( da[0:2]**2))
# Approach direction (normal vector)
na = da/da_mag
# Sharp angle between the runway and the mean wind
theta = np.arccos( da[0]/da_xy )
print(' Sharp angle between the runway and the mean wind: theta = {} deg'.format( theta*rad2deg ))
print(' Done.\n')
# -------------------------------------------------------- #
# Hornet's approach speed and velocity
Uappr_mag = 69.
Ua = Uappr_mag*na
# Mean headwind
Uhw_mean = Ux_mean * np.cos( theta ) - Uy_mean * np.sin( theta )
# Speed relative to the ground ... perhaps not needed.
U_grd = Uappr_mag - Uhw_mean
# Approach angle
gamma = np.arctan( da[2]/da_xy )
# F18 Data:
rho = 1.2 # standard air
CL = 1.2 # at 7deg angle of attack
CLa = 2.86 # 1/rad (alpha in range [3deg, 10deg])
Aref=18.*3.
K = 0.5*rho*Aref
# Extract deviations in the headwind and compute the changes in AoA [alpha].
Lift = K*Uappr_mag**2*CL
n = 0
dL_max = 0.
dL_sum = 0.
dL_mxv = 0. # Maximum variance.
dL_p2p_max = 0.
dL_p2p_min = 0.
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList ) # NOTE: varList = ['u', 'v', 'w']
du = tv[0]-Ux_mean
dv = tv[1]-Uy_mean
dw = tv[2]-Uz_mean # Uz_mean could be replaced by 0.
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
du[np.isnan(du)] = 0.; dv[np.isnan(dv)] = 0.; dw[np.isnan(dw)] = 0.
dU_hw = du * np.cos( theta ) - dv * np.sin( theta )
dalpha = np.arctan( dw/Uappr_mag)
# Change in lift due to changes in AoA:
dL_a = K*Uappr_mag**2*CLa*dalpha
# Change in lift due to changes in head wind.
dL_u = 2.*K*CL*Uappr_mag*dU_hw
dLp_a = dL_a/Lift * 100. # In percentage
dLp_u = dL_u/Lift * 100.
dLp_mag= np.sqrt( (dLp_a+dLp_u)**2 )
#fig = plt.figure(num=1, figsize=(18,9))
fig, (ax1, ax2) = plt.subplots(num=1, nrows=2, sharex=True, figsize=(18,11))
lines11,=ax1.plot( s,dLp_a,'-o', linewidth=1.6 )
lines12,=ax1.plot( s,dLp_u,'-o', linewidth=1.6 )
ax1.legend( (lines11,lines12) , ('dL(alpha) [%]',' dL(u) [%]'), loc=1 )
ax1.set_ylim([-8., 8.])
ax1.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax1.set_title(' Changes in Lift due to turbulence ', fontsize=22)
ax1.set_ylabel(' dL [%] ', fontsize=22); ax1.grid(True)
lines2,=ax2.plot(s,dLp_mag,'-ro', linewidth=1.6 )
ax2.legend( (lines2,) , (' ABS(SUM(dL)) [%]',), loc=1 )
ax2.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax2.set_ylim([-1., 12.5]); ax2.set_xlim([ min(s) , max(s)])
ax2.set_xlabel(' Distance along approach line [m] ', fontsize=22 )
ax2.set_ylabel(' dL [%] ', fontsize=22 ); ax2.grid(True)
# Maximum variance
dL_ivar = np.var( dLp_mag[ du > 0 ] ) # Consider only nonzero values.
if( dL_ivar > dL_mxv ): dL_mxv = dL_ivar
# Mean variance
dL_sum += dL_ivar
dL_var = dL_sum/float(n)
dL_imax = np.max(dLp_mag)
if( dL_imax > dL_max ): dL_max = dL_imax
dL_ip2p_mx, dL_ip2p_mn = p2pMaxMin( (dLp_a+dLp_u) )
if( dL_ip2p_mx > dL_p2p_max ): dL_p2p_max = dL_ip2p_mx
if( dL_ip2p_mn < dL_p2p_min ): dL_p2p_min = dL_ip2p_mn
infoStr =' Time = {:4d}s\n'.format((n-1)*2)
infoStr +=' Current P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_ip2p_mx, dL_ip2p_mn)
infoStr +=' Running P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_p2p_max, dL_p2p_min)
#infoStr +=' Max(dL) = {:4.1f}%\n'.format(dL_imax)
infoStr +=' Running Max(dL) = {:4.1f}%\n'.format(dL_max)
#infoStr +=' Var(dL) = {:4.1f}%\n'.format(dL_ivar)
infoStr +=' Running Mean(Var(dL)) = {:4.1f}%\n'.format(dL_var)
infoStr +=' Running Max(Var(dL)) = {:4.1f}%\n'.format(dL_mxv)
plt.text( 1. , 5.5, infoStr , fontsize=20)
figStr = '{}_{:04d}.jpg'.format(figName,n)
print(' Saving figure {} '.format(figStr))
fig.savefig(figStr)
ax1.cla(); ax2.cla(); fig.clf()
if( not noAnimation ):
cmd = 'convert {}_* {} '.format(figName,fileAnim)
print ' Executing command: ${}'.format(cmd)
sb.call(cmd, shell=True)
print(' All Done! ')
| mit |
thilbern/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 17 | 6222 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
"""BernoulliRBM should work on small sparse matrices."""
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
from the same input
"""
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
the same input even when the input is sparse, and test against non-sparse
"""
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
"""Check if we don't get NaNs sampling the full digits dataset."""
rng = np.random.RandomState(42)
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=rng)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
def test_score_samples():
"""Test score_samples (pseudo-likelihood) method."""
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
"""
Make sure RBM works with sparse input when verbose=True
"""
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
brycefrank/pyfor | pyfortest/test_pyfor.py | 1 | 14739 | from pyfor import *
import unittest
# modeled heavily after laspytest
# https://github.com/laspy/laspy/blob/master/laspytest/test_laspy.py
import pandas as pd
import laspy
import os
import numpy as np
import geopandas as gpd
import plyfile
import matplotlib.pyplot as plt
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
test_las = os.path.join(data_dir, "test.las")
test_ply = os.path.join(data_dir, "test.ply")
test_laz = os.path.join(data_dir, "test.laz")
test_shp = os.path.join(data_dir, "clip.shp")
proj4str = "+proj=utm +zone=10 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
test_points = {
"x": [0, 1, 2],
"y": [0, 1, 2],
"z": [0, 1, 2],
"intensity": [0, 1, 2],
"classification": [0, 1, 2],
"flag_byte": [0, 1, 2],
"scan_angle_rank": [0, 1, 2],
"user_data": [0, 1, 2],
"pt_src_id": [0, 1, 2],
"return_num": [0, 1, 2],
}
class DataTestCase:
def test_init(self):
self.assertEqual(type(self.test_data), self.test_type)
def test_data_length(self):
self.assertEqual(len(self.test_data.points), len(test_points["z"]))
class PLYDataTestCase(unittest.TestCase, DataTestCase):
def setUp(self):
self.test_points = pd.DataFrame.from_dict(test_points).astype(np.float)
self.test_header = 0
self.test_data = cloud.PLYData(self.test_points, self.test_header)
self.test_type = cloud.PLYData
def test_write(self):
self.test_data.write(os.path.join(data_dir, "temp_test_write.ply"))
plyfile.PlyData.read(os.path.join(data_dir, "temp_test_write.ply"))
os.remove(os.path.join(data_dir, "temp_test_write.ply"))
class LASDataTestCase(unittest.TestCase, DataTestCase):
def setUp(self):
self.test_points = pd.DataFrame.from_dict(test_points)
self.test_header = laspy.file.File(test_las).header
self.column = [0, 1]
self.test_data = cloud.LASData(self.test_points, self.test_header)
self.test_type = cloud.LASData
def tearDown(self):
self.test_data.header.reader.close()
def test_write(self):
self.test_data.write(os.path.join(data_dir, "temp_test_write.las"))
read = laspy.file.File(os.path.join(data_dir, "temp_test_write.las"))
self.assertEqual(type(read), laspy.file.File)
read.close()
os.remove(os.path.join(data_dir, "temp_test_write.las"))
class CloudTestCase:
def test_not_supported(self):
with self.assertRaises(ValueError):
# Attempt to read a non-supported file type
cloud.Cloud(os.path.join(data_dir, "clip.shp"))
def test_cloud_summary(self):
print(self.test_cloud)
def test_grid_creation(self):
"""Tests if the grid is successfully created."""
# Does the call to grid return the proper type
self.assertEqual(type(self.test_cloud.grid(1)), rasterizer.Grid)
class LASCloudTestCase(unittest.TestCase, CloudTestCase):
def setUp(self):
self.test_cloud = cloud.Cloud(test_las)
def tearDown(self):
self.test_cloud.data.header.reader.close()
def test_dimensions_loaded(self):
local_cloud = cloud.Cloud(test_las)
self.assertEqual(type(local_cloud), cloud.Cloud)
if local_cloud.extension == ".las" or local_cloud.extension == ".laz":
self.assertListEqual(
list(local_cloud.data.points.columns.values),
[
"x",
"y",
"z",
"intensity",
"return_num",
"classification",
"flag_byte",
"scan_angle_rank",
"user_data",
"pt_src_id",
],
)
local_cloud.data.header.reader.close()
def test_filter_z(self):
self.test_cloud.filter(40, 41, "z")
self.assertEqual(self.test_cloud.data.count, 3639)
self.assertLessEqual(self.test_cloud.data.max[2], [41])
self.assertGreaterEqual(self.test_cloud.data.min[2], [40])
def test_clip_polygon(self):
poly = gpd.read_file(test_shp)["geometry"][0]
self.test_cloud.clip(poly)
@unittest.skip("Broken for multiple platforms")
def test_plot(self):
self.test_cloud.plot()
@unittest.skip("Broken for multiple platforms")
def test_plot3d(self):
self.test_cloud.plot3d()
self.test_cloud.plot3d(dim="user_data")
def test_normalize(self):
self.test_cloud.normalize(3)
self.assertLess(self.test_cloud.data.max[2], 65)
def test_normalize_classified(self):
self.test_cloud.normalize(3, classified=True)
self.assertLess(self.test_cloud.data.max[2], 65)
def test_subtract(self):
zf = ground_filter.Zhang2003(1)
bem = zf.bem(self.test_cloud)
bem.write("./temp_bem.tif")
self.test_cloud.subtract("./temp_bem.tif")
# In theory this should equal the z column from a regular normalization, do this on a separate cloud
pc = cloud.Cloud(test_las)
pc.normalize(1)
normalize_z = pc.data.points["z"]
subtracted_z = self.test_cloud.data.points["z"]
# Allow a tolerance of 0.005 meters
self.assertLess(
np.mean(normalize_z.values) - np.mean(subtracted_z.values), 0.005
)
os.remove("./temp_bem.tif")
def test_chm(self):
self.test_cloud.chm(0.5, interp_method="nearest", pit_filter="median")
def test_chm_without_interpolation_method(self):
self.assertEqual(
type(self.test_cloud.chm(0.5, interp_method=None)), rasterizer.Raster
)
def test_append(self):
n_points = len(self.test_cloud.data.points)
self.test_cloud.data._append(self.test_cloud.data)
self.assertGreater(len(self.test_cloud.data.points), n_points)
def test_write(self):
self.test_cloud.write(os.path.join(data_dir, "test_write.las"))
os.remove(os.path.join(data_dir, "test_write.las"))
# TODO broken on Travis
# class LAZCloudTestCase(LASCloudTestCase):
# def setUp(self):
# self.test_cloud = cloud.Cloud(test_laz)
class PLYCloudTestCase(unittest.TestCase, CloudTestCase):
def setUp(self):
self.test_cloud = cloud.Cloud(test_ply)
def test_write(self):
self.test_cloud.write(os.path.join(data_dir, "test_write.ply"))
os.remove(os.path.join(data_dir, "test_write.ply"))
class GridTestCase(unittest.TestCase):
def setUp(self):
self.test_grid = cloud.Cloud(test_las).grid(1)
def tearDown(self):
self.test_grid.cloud.data.header.reader.close()
def test_m(self):
self.assertEqual(200, self.test_grid.m)
def test_n(self):
self.assertEqual(200, self.test_grid.n)
def test_cloud(self):
self.assertEqual(type(self.test_grid.cloud), cloud.Cloud)
def test_empty_cells(self):
empty = self.test_grid.empty_cells
# Check that there are the correct number
self.assertEqual(empty.shape, (301, 2))
def test_raster(self):
raster = self.test_grid.raster("max", "z")
self.assertEqual(type(raster), rasterizer.Raster)
def test_interpolate(self):
self.test_grid.interpolate("max", "z")
def test_update(self):
"""
Change a few points from parent cloud, update and check if different
:return:
"""
pre = self.test_grid.m
self.test_grid.cloud.data.points = self.test_grid.cloud.data.points.iloc[1:50]
self.test_grid._update()
post = self.test_grid.m
self.assertEqual(pre, 200)
self.assertEqual(post, 1)
class GridMetricsTestCase(unittest.TestCase):
def setUp(self):
self.test_grid = cloud.Cloud(test_las).grid(20)
def test_pct_above_mean(self):
all_above_mean = metrics.pct_above_heightbreak(
self.test_grid, r=0, heightbreak="mean"
)
self.assertEqual(0, np.sum(all_above_mean.array > 1))
r1_above_mean = metrics.pct_above_heightbreak(
self.test_grid, r=1, heightbreak="mean"
)
self.assertEqual(0, np.sum(r1_above_mean.array > 1))
def test_pct_above_heightbreak(self):
all_above_2 = metrics.pct_above_heightbreak(self.test_grid, r=0, heightbreak=2)
self.assertEqual(0, np.sum(all_above_2.array > 1))
r1_above_2 = metrics.pct_above_heightbreak(self.test_grid, r=1, heightbreak=2)
self.assertEqual(0, np.sum(r1_above_2.array > 1))
def test_return_num(self):
rast = metrics.return_num(self.test_grid, 1)
self.assertEqual(1220, rast.array[0, 0])
rast = metrics.return_num(self.test_grid, 100)
self.assertTrue(np.isnan(rast.array[0, 0]))
def test_all_returns(self):
rast = metrics.all_returns(self.test_grid)
self.assertEqual(1531, rast.array[0, 0])
def test_total_returns(self):
rast = metrics.total_returns(self.test_grid)
self.assertEqual(1531, rast.array[0, 0])
def test_standard_metrics(self):
metrics_dict = metrics.standard_metrics_grid(self.test_grid, 2)
self.assertEqual(len(metrics_dict), 25)
class CloudMetrcsTestCase(unittest.TestCase):
def setUp(self):
self.test_cloud = cloud.Cloud(test_las)
def test_standard_metrics(self):
metrics.standard_metrics_cloud(self.test_cloud.data.points, 2)
class RasterTestCase(unittest.TestCase):
def setUp(self):
pc = cloud.Cloud(test_las)
self.test_raster = pc.grid(1).raster("max", "z")
self.test_raster.grid.cloud.crs = proj4str
def tearDown(self):
self.test_raster.grid.cloud.data.header.reader.close()
def test_affine(self):
affine = self.test_raster._affine
self.assertEqual(affine[0], 1.0)
self.assertEqual(affine[1], 0.0)
self.assertEqual(affine[2], 405000.01000000001)
self.assertEqual(affine[3], 0.0)
self.assertEqual(affine[4], -1.0)
self.assertEqual(affine[5], 3276499.9900000002)
self.assertEqual(affine[6], 0)
def test_array_oriented_correctly(self):
"""
Tests if the index [0,0] refers to the top left corner of the image. That is, if I were to plot the raster
using plt.imshow it would appear to the user as a correctly oriented image.
"""
self.assertEqual(self.test_raster.array[0, 0], 45.11)
@unittest.skip("Broken for multiple platforms")
def test_plot(self):
self.test_raster.plot()
self.test_raster.plot(return_plot=True)
def test_force_extent_contract(self):
min_x, min_y = self.test_raster.grid.cloud.data.header.min[0:2]
max_x, max_y = self.test_raster.grid.cloud.data.header.max[0:2]
# Test buffering in 10 meters
buffer = 10
bbox = (min_x + buffer, max_x - buffer, min_y + buffer, max_y - buffer)
self.test_raster.force_extent(bbox)
self.assertEqual(self.test_raster.array.shape, (180, 180))
self.assertEqual(self.test_raster._affine[2], min_x + buffer)
self.assertEqual(self.test_raster._affine[5], max_y - buffer)
def test_force_extent_expand(self):
min_x, min_y = self.test_raster.grid.cloud.data.header.min[0:2]
max_x, max_y = self.test_raster.grid.cloud.data.header.max[0:2]
# Test buffering out 10 meters
buffer = 10
bbox = (min_x - buffer, max_x + buffer, min_y - buffer, max_y + buffer)
self.test_raster.force_extent(bbox)
self.assertEqual(self.test_raster.array.shape, (220, 220))
self.assertEqual(self.test_raster._affine[2], min_x - buffer)
self.assertEqual(self.test_raster._affine[5], max_y + buffer)
def test_write_with_crs(self):
self.test_raster.write("./temp_tif.tif")
def test_write_without_crs(self):
self.test_raster.crs = None
self.test_raster.write("./temp_tif.tif")
class RetileTestCase(unittest.TestCase):
def setUp(self):
cdf = collection.from_dir(os.path.join(data_dir, "mock_collection"))
self.retiler = collection.Retiler(cdf)
def test_retile_raster(self):
self.retiler.retile_raster(10, 100)
self.retiler.retile_raster(10, 100, 10)
def test_retile_buffer(self):
self.retiler.retile_buffer(10)
class GISExportTestCase(unittest.TestCase):
def setUp(self):
self.test_grid = cloud.Cloud(test_las).grid(1)
self.test_raster = self.test_grid.raster("max", "z")
def tearDown(self):
self.test_raster.grid.cloud.data.header.reader.close()
def test_project_indices(self):
test_indices = np.array([[0, 0], [1, 1]])
gisexport.project_indices(test_indices, self.test_raster)
def test_pcs_exists(self):
print(os.path.realpath(__file__))
pcs_path = os.path.join(
"..", "pyfor", "pcs.csv", os.path.dirname(os.path.realpath(__file__))
)
self.assertTrue(os.path.exists(pcs_path))
def test_array_to_raster_writes(self):
test_grid = cloud.Cloud(test_las).grid(1)
test_grid.cloud.crs = proj4str
test_raster = test_grid.raster("max", "z")
gisexport.array_to_raster(
test_raster.array,
test_raster._affine,
proj4str,
os.path.join(data_dir, "temp_raster_array.tif"),
)
self.assertTrue(os.path.exists(os.path.join(data_dir, "temp_raster_array.tif")))
os.remove(os.path.join(data_dir, "temp_raster_array.tif"))
class VoxelGridTestCase(unittest.TestCase):
def setUp(self):
self.test_voxel_grid = voxelizer.VoxelGrid(cloud.Cloud(test_las), cell_size=2)
def test_voxel_raster(self):
self.test_voxel_grid.voxel_raster("count", "z")
class KrausPfeifer1998(unittest.TestCase):
def setUp(self):
self.test_cloud = cloud.Cloud(test_las)
self.test_kp_filter = ground_filter.KrausPfeifer1998(3)
def test_filter(self):
self.test_kp_filter._filter(self.test_cloud.grid(self.test_kp_filter.cell_size))
def test_classify(self):
self.test_kp_filter.classify(self.test_cloud)
self.assertEqual(
133362, np.sum(self.test_cloud.data.points["classification"] == 2)
)
class Zhang2003TestCase(unittest.TestCase):
def setUp(self):
self.test_cloud = cloud.Cloud(test_las)
self.test_zhang_filter = ground_filter.Zhang2003(3)
def test_filter(self):
self.test_zhang_filter._filter(
self.test_cloud.grid(self.test_zhang_filter.cell_size)
)
def test_bem(self):
self.test_zhang_filter.bem(self.test_cloud)
| mit |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_New_Server_0706_2015_2.py | 1 | 47386 |
# coding: utf-8
# In[5]:
import sys, os
sys.path.append('../../../libs/')
sys.path.append('../libs/')
sys.path.append('../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pdb
import pickle
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
# from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
import cPickle
import gzip
import numpy
import os
import theano
from theano.tensor.shared_randomstreams import RandomStreams
import time
from dlnn.io_func.model_io import _nnet2file, _cfg2file, _file2nnet, log
from dlnn.utils.learn_rates import _lrate2file, _file2lrate
from dlnn.models.dnn import DNN
from dlnn.models.dropout_nnet import DNN_Dropout
from dlnn.models.sda import SdA, Sda_xy
import theano.tensor as T
from dlnn.utils.network_config import NetworkConfig
from dlnn.utils.sda_config import SdAConfig
from dlnn.utils.utils import parse_arguments, save_two_integers, read_two_integers
from dlnn.learning.sgd import train_sgd, validate_by_minibatch
from dlnn.utils.utils import shared_dataset_X
from numpy import dtype, shape
from dlnn.factories import Sda_factory, Sda_xy_factory, DNN_factory, Parellel_Sda_factory
# In[6]:
from DL_libs import performance_score, Preprocessing_Scaler_with_mean_point5, cal_epochs, pretrain_a_Sda_with_estop, train_a_Sda, trainSda
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# set settings for this script
settings = {}
settings['filename'] = 'list_of_DDIs_for_contact_matrix_paper123.txt'
#settings['filename'] = 'testDDI.txt'
settings['fisher_mode'] = 'FisherM1ONLY'# settings['fisher_mode'] = 'FisherM1ONLY'
settings['with_auc_score'] = False
settings['n_cores'] = 8
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['DL'] = 1
settings['DL_old'] = 1
settings['Sda_new'] = 1
settings['DL_xy'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_COMBO'] = 1
settings['SVM_RBF'] = 1
settings['SAE_SVM_RBF'] = 0
settings['SAE_SVM_RBF_COMBO'] = 1
settings['SVM_POLY'] = 0
settings['DL_S'] = 1
settings['DL_U'] = 0
settings['DL_S_new'] = 1
settings['Sda_xy_with_first'] = 1
settings['DL_S_new_contraction'] = 1
settings['DL_S_new_sparsity'] = 1
settings['DL_S_new_weight_decay'] = 2
settings['DL_S_new_Drop_out'] = 1
settings['finetune_lr'] = 1
settings['batch_size'] = 100
settings['pretraining_interations'] = 10003
settings['pretrain_lr'] = 0.01
settings['training_interations'] = 20003
#settings['training_epochs'] = 10001
settings['hidden_layers_sizes'] = [100, 100]
settings['corruption_levels'] = [0, 0]
cfg = settings.copy()
cfg['learning_rate'] = 0.01 # this is for pretraining
#cfg['train-data'] = "/home/du/Dropbox/Project/libs/dlnn/cmds/train.pickle.gz,partition=600m"
#cfg['batch_size'] = 100
cfg['wdir'] = '/home/du/Documents/tmp'
cfg['param-output-file']= 'sda.mdl'
cfg['sparsity'] = 0
cfg['sparsity_weight'] = 0
# 1 means use equal weight for X. Y, 0 means use the scaled weight Y is set 1/size of X, None means use the original vector.
cfg['n_outs'] = 1
#cfg['lrate'] ='C:0.08:5 ' #'D:1:0.5:0.05,0.005:5000'
#cfg['lrate'] = 'D:0.1:0.5:0.05,0.005:1500' #'D:0.1:0.5:0.05,0.005:1500'
cfg['lrate_pre'] ='D:1:0.5:0.05,0.005:'
'''
constant --lrate="C:l:n"
Eg. C:0.08:15
run n iterations with lrate = l unchanged
newbob
--lrate="D:l:c:dh,ds:n"
Eg. D:0.08:0.5:0.05,0.05:8
starts with the learning rate l; if the validation error reduction between two consecutive epochs is less than dh, the learning rate is scaled by c during each of the remaining epochs. Traing finally terminates when the validation error reduction between two consecutive epochs falls below ds. n is the minimum epoch number after which scaling can be performed.
min-rate newbob
--lrate="MD:l:c:dh,ml:n"
Eg. MD:0.08:0.5:0.05,0.0002:8
the same as newbob, except that training terminates when the learning rate value falls below ml
fixed newbob
--lrate="FD:l:c:eh,es"
Eg. FD:0.08:0.5:10,6 starts with the learning rate l; after eh epochs, the learning rate starts to be scaled by c. Traing terminates when doing another es epochs after scaling starts. n is the minimum epoch number after which scaling can be performed.
'''
filename = settings['filename']
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
for key, value in cfg.items():
logger.info(key +': '+ str(value))
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014_paper/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename)
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename, dtype = theano.config.floatX)
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[7]:
# In[7]:
# In[8]:
import sklearn.preprocessing
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if with_auc_score == False:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
try:
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance(settings=cfg)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10, shuffle = True)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
settings['epoch_number'] = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
# deep xy autoencoders
settings['lrate'] = settings['lrate_pre'] + str(training_epochs)
settings['n_ins'] = x_train_minmax.shape[1]
if settings['DL_xy']:
cfg = settings.copy()
cfg['weight_y'] = 1
print 'DL_xy'
train_x = x_train_minmax; train_y = y_train_minmax
sdaf = Sda_xy_factory(cfg)
sdaf.sda.pretraining(train_x, train_y)
dnnf = DNN_factory(cfg)
dnnf.dnn.load_pretrain_from_Sda(sdaf.sda)
dnnf.dnn.finetuning((x_train_minmax, y_train_minmax),(x_validation_minmax, y_validation_minmax))
training_predicted = dnnf.dnn.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_xy', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = dnnf.dnn.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_xy', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['Sda_xy_with_first']:
cfg = settings.copy()
cfg['weight_y'] = 10
cfg['firstlayer_xy'] = 1
print 'firstlayer_xy'
train_x = x_train_minmax; train_y = y_train_minmax
sdaf = Sda_xy_factory(cfg)
sdaf.sda.pretraining(train_x, train_y)
dnnf = DNN_factory(cfg)
dnnf.dnn.load_pretrain_from_Sda(sdaf.sda)
dnnf.dnn.finetuning((x_train_minmax, y_train_minmax),(x_validation_minmax, y_validation_minmax))
training_predicted = dnnf.dnn.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'Sda_xy_with_first', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = dnnf.dnn.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'Sda_xy_with_first', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['Sda_new']:
print 'Sda_new'
cfg = settings.copy()
train_x = x_train_minmax; train_y = y_train_minmax
cfg['n_ins'] = train_x.shape[1]
sdaf = Sda_factory(cfg)
sda = sdaf.sda.pretraining(train_x = train_x)
sdaf.dnn.finetuning((x_train_minmax, y_train_minmax),(x_validation_minmax, y_validation_minmax))
training_predicted = sdaf.dnn.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'Sda_new', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sdaf.dnn.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'Sda_new', isTest) + tuple(performance_score(y_test, test_predicted).values()))
#### new prepresentation
x = X_train_pre_validation_minmax
a_MAE_A = pretrain_a_Sda_with_estop(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
print 'SAE followed by SVM RBF'
x = X_train_pre_validation_minmax
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_COMBO']:
print 'SAE followed by SVM with combo feature'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_combo, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF_COMBO']:
print 'SAE followed by SVM RBF with combo feature'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_combo, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['DL']:
print "direct deep learning"
sda = train_a_Sda(x_train_minmax, pretrain_lr, finetune_lr,
y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs, n_outs = settings['n_outs']
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_old']:
print "direct deep learning old without early stop"
sda = trainSda(x_train_minmax, y_train,
x_validation_minmax, y_validation_minmax,
x_test_minmax, y_test,pretrain_lr, finetune_lr,
pretraining_X_minmax=None,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs, n_outs = settings['n_outs']
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_old', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_old', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_U']:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr, n_outs = settings['n_outs']
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = pretrain_a_Sda_with_estop(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = pretrain_a_Sda_with_estop(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
sda_transformed = train_a_Sda(new_x_train_minmax_whole, pretrain_lr, finetune_lr,
y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs, n_outs = settings['n_outs']
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S_new']:
# deep learning using split network
print 'new deep learning using split network'
cfg = settings.copy()
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S_new_contraction']:
print 'DL_S_new_contraction'
cfg = settings.copy()
cfg['contraction_level'] = 0.1
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new_contraction', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new_contraction', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S_new_sparsity'] == 1:
print 'DL_S_new_sparsity'
cfg = settings.copy()
cfg['sparsity'] = 0.1
cfg['sparsity_weight'] = 0.1
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new_sparsity', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new_sparsity', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S_new_weight_decay'] == 2:
cfg = settings.copy()
cfg['l2_reg'] =0.0001
print 'l2_reg'
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'l2_reg', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'l2_reg', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S_new_weight_decay'] == 1:
print 'l1_reg'
cfg = settings.copy()
cfg['l1_reg'] =0.1
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'l1_reg', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'l1_reg', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S_new_Drop_out'] == 1:
cfg = settings.copy()
cfg['dropout_factor'] = 0.3
print 'DL_S_new_Drop_out'
p_sda = Parellel_Sda_factory(cfg)
p_sda.supervised_training(x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax)
isTest = False #new
training_predicted = p_sda.predict(x_train_minmax)
y_train = y_train_minmax
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new_Drop_out', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
isTest = True #new
y_test = test_y
test_predicted = p_sda.predict(x_test_minmax)
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S_new_Drop_out', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_newDL_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(settings['training_interations']) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
# In[10]:
#LOO_out_performance_for_all(ddis)
#LOO_out_performance_for_all(ddis)
#'''
from multiprocessing import Pool
pool = Pool(settings['n_cores'])
pool.map(process_one_ddi_tenfold, ddis[:20])
pool.close()
pool.join()
#'''
#process_one_ddi_tenfold(ddis[1])
# In[25]:
x = logging._handlers.copy()
for i in x:
logger.removeHandler(i)
i.flush()
i.close()
| gpl-2.0 |
tylerjereddy/scipy | scipy/cluster/hierarchy.py | 12 | 148459 | """
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
optimal_leaf_ordering
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
Utility classes:
.. autosummary::
:toctree: generated/
DisjointSet -- data structure for incremental connectivity queries
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy, _optimal_leaf_ordering
import scipy.spatial.distance as distance
from scipy._lib._disjoint_set import DisjointSet
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete',
'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster',
'fclusterdata', 'from_mlab_linkage', 'inconsistent',
'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage',
'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists',
'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering',
'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree',
'ward', 'weighted', 'distance']
class ClusterWarning(UserWarning):
pass
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copy the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accept a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
"""
Generate a random distance matrix stored in condensed form.
Parameters
----------
pnts : int
The number of points in the distance matrix. Has to be at least 2.
Returns
-------
D : ndarray
A ``pnts * (pnts - 1) / 2`` sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Perform single/min/nearest linkage on the condensed distance matrix ``y``.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import single, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = single(y)
>>> Z
array([[ 0., 1., 1., 2.],
[ 2., 12., 1., 3.],
[ 3., 4., 1., 2.],
[ 5., 14., 1., 3.],
[ 6., 7., 1., 2.],
[ 8., 16., 1., 3.],
[ 9., 10., 1., 2.],
[11., 18., 1., 3.],
[13., 15., 2., 6.],
[17., 20., 2., 9.],
[19., 21., 2., 12.]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32)
>>> fcluster(Z, 1, criterion='distance')
array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
>>> fcluster(Z, 2, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Perform complete/max/farthest point linkage on a condensed distance matrix.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import complete, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = complete(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.41421356, 3. ],
[ 5. , 13. , 1.41421356, 3. ],
[ 8. , 14. , 1.41421356, 3. ],
[11. , 15. , 1.41421356, 3. ],
[16. , 17. , 4.12310563, 6. ],
[18. , 19. , 4.12310563, 6. ],
[20. , 21. , 5.65685425, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, 1.5, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, 4.5, criterion='distance')
array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 6, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Perform average/UPGMA linkage on a condensed distance matrix.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
`linkage` for more information on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import average, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = average(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.20710678, 3. ],
[ 5. , 13. , 1.20710678, 3. ],
[ 8. , 14. , 1.20710678, 3. ],
[11. , 15. , 1.20710678, 3. ],
[16. , 17. , 3.39675184, 6. ],
[18. , 19. , 3.39675184, 6. ],
[20. , 21. , 4.09206523, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, 1.5, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 6, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Perform weighted/WPGMA linkage on the condensed distance matrix.
See `linkage` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
`linkage` for more information on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import weighted, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = weighted(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 11. , 1. , 2. ],
[ 2. , 12. , 1.20710678, 3. ],
[ 8. , 13. , 1.20710678, 3. ],
[ 5. , 14. , 1.20710678, 3. ],
[10. , 15. , 1.20710678, 3. ],
[18. , 19. , 3.05595762, 6. ],
[16. , 17. , 3.32379407, 6. ],
[20. , 21. , 4.06357713, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32)
>>> fcluster(Z, 1.5, criterion='distance')
array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32)
>>> fcluster(Z, 6, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Perform centroid/UPGMC linkage.
See `linkage` for more information on the input matrix,
return structure, and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
an m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import centroid, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = centroid(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3.33333333, 6. ],
[16. , 17. , 3.33333333, 6. ],
[20. , 21. , 3.33333333, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)
>>> fcluster(Z, 1.1, criterion='distance')
array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
>>> fcluster(Z, 2, criterion='distance')
array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Perform median/WPGMC linkage.
See `linkage` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See `linkage`
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
an m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import median, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = median(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)
>>> fcluster(Z, 1.1, criterion='distance')
array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)
>>> fcluster(Z, 2, criterion='distance')
array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)
>>> fcluster(Z, 4, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Perform Ward's linkage on a condensed distance matrix.
See `linkage` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``y``.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
an m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix. See
`linkage` for more information on the return structure and
algorithm.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
Examples
--------
>>> from scipy.cluster.hierarchy import ward, fcluster
>>> from scipy.spatial.distance import pdist
First, we need a toy dataset to play with::
x x x x
x x
x x
x x x x
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
Then, we get a condensed distance matrix from this dataset:
>>> y = pdist(X)
Finally, we can perform the clustering:
>>> Z = ward(y)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
The linkage matrix ``Z`` represents a dendrogram - see
`scipy.cluster.hierarchy.linkage` for a detailed explanation of its
contents.
We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster
each initial point would belong given a distance threshold:
>>> fcluster(Z, 0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, 1.1, criterion='distance')
array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
>>> fcluster(Z, 3, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, 9, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a
plot of the dendrogram.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean', optimal_ordering=False):
"""
Perform hierarchical/agglomerative clustering.
The input y may be either a 1-D condensed distance matrix
or a 2-D array of observation vectors.
If y is a 1-D condensed distance matrix,
then y must be a :math:`\\binom{n}{2}` sized
vector, where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall, :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest (also called WPGMA).
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may choose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in :math:`n` dimensions may be passed as
an :math:`m` by :math:`n` array. All elements of the condensed distance
matrix must be finite, i.e., no NaNs or infs.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``pdist``
function for a list of valid distance metrics. A custom distance
function can also be used.
optimal_ordering : bool, optional
If True, the linkage matrix will be reordered so that the distance
between successive leaves is minimal. This results in a more intuitive
tree structure when the data are visualized. defaults to False, because
this algorithm can be slow, particularly on large datasets [2]_. See
also the `optimal_leaf_ordering` function.
.. versionadded:: 1.0.0
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
Notes
-----
1. For method 'single', an optimized algorithm based on minimum spanning
tree is implemented. It has time complexity :math:`O(n^2)`.
For methods 'complete', 'average', 'weighted' and 'ward', an algorithm
called nearest-neighbors chain is implemented. It also has time
complexity :math:`O(n^2)`.
For other methods, a naive algorithm is implemented with :math:`O(n^3)`
time complexity.
All algorithms use :math:`O(n^2)` memory.
Refer to [1]_ for details about the algorithms.
2. Methods 'centroid', 'median', and 'ward' are correctly defined only if
Euclidean pairwise metric is used. If `y` is passed as precomputed
pairwise distances, then it is the user's responsibility to assure that
these distances are in fact Euclidean, otherwise the produced result
will be incorrect.
See Also
--------
scipy.spatial.distance.pdist : pairwise distance metrics
References
----------
.. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
algorithms", :arXiv:`1109.2378v1`.
.. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal
leaf ordering for hierarchical clustering", 2001. Bioinformatics
:doi:`10.1093/bioinformatics/17.suppl_1.S22`
Examples
--------
>>> from scipy.cluster.hierarchy import dendrogram, linkage
>>> from matplotlib import pyplot as plt
>>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
>>> Z = linkage(X, 'ward')
>>> fig = plt.figure(figsize=(25, 10))
>>> dn = dendrogram(Z)
>>> Z = linkage(X, 'single')
>>> fig = plt.figure(figsize=(25, 10))
>>> dn = dendrogram(Z)
>>> plt.show()
"""
if method not in _LINKAGE_METHODS:
raise ValueError("Invalid method: {0}".format(method))
y = _convert_to_double(np.asarray(y, order='c'))
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
[y] = _copy_arrays_if_base_present([y])
elif y.ndim == 2:
if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
raise ValueError("Method '{0}' requires the distance metric "
"to be Euclidean".format(method))
if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
if np.all(y >= 0) and np.allclose(y, y.T):
_warning('The symmetric non-negative hollow observation '
'matrix looks suspiciously like an uncondensed '
'distance matrix')
y = distance.pdist(y, metric)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
if not np.all(np.isfinite(y)):
raise ValueError("The condensed distance matrix must contain only "
"finite values.")
n = int(distance.num_obs_y(y))
method_code = _LINKAGE_METHODS[method]
if method == 'single':
result = _hierarchy.mst_single_linkage(y, n)
elif method in ['complete', 'average', 'weighted', 'ward']:
result = _hierarchy.nn_chain(y, n, method_code)
else:
result = _hierarchy.fast_linkage(y, n, method_code)
if optimal_ordering:
return optimal_leaf_ordering(result, y)
else:
return result
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The `to_tree` function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
All parameter names are also attributes.
Parameters
----------
id : int
The node id.
left : ClusterNode instance, optional
The left child tree node.
right : ClusterNode instance, optional
The right child tree node.
dist : float, optional
Distance for this cluster in the linkage matrix.
count : int, optional
The number of samples in this cluster.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def __lt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist < node.dist
def __gt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist > node.dist
def __eq__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist == node.dist
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Return a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Return True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Perform pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the ``i``-th leaf node in the pre-order traversal ``n[i]``,
the result of ``func(n[i])`` is stored in ``L[i]``. If not
provided, the index of the original observation to which the node
corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def _order_cluster_tree(Z):
"""
Return clustering nodes in bottom-up order by distance.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
Returns
-------
nodes : list
A list of ClusterNode objects.
"""
q = deque()
tree = to_tree(Z)
q.append(tree)
nodes = []
while q:
node = q.popleft()
if not node.is_leaf():
bisect.insort_left(nodes, node)
q.append(node.get_right())
q.append(node.get_left())
return nodes
def cut_tree(Z, n_clusters=None, height=None):
"""
Given a linkage matrix Z, return the cut tree.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
n_clusters : array_like, optional
Number of clusters in the tree at the cut point.
height : array_like, optional
The height at which to cut the tree. Only possible for ultrametric
trees.
Returns
-------
cutree : array
An array indicating group membership at each agglomeration step. I.e.,
for a full cut tree, in the first column each data point is in its own
cluster. At the next step, two nodes are merged. Finally, all
singleton and non-singleton clusters are in one group. If `n_clusters`
or `height` are given, the columns correspond to the columns of
`n_clusters` or `height`.
Examples
--------
>>> from scipy import cluster
>>> import numpy as np
>>> from numpy.random import default_rng
>>> rng = default_rng()
>>> X = rng.random((50, 4))
>>> Z = cluster.hierarchy.ward(X)
>>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
>>> cutree[:10]
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[3, 4],
[2, 2],
[0, 0],
[1, 5],
[3, 6],
[4, 7]]) # random
"""
nobs = num_obs_linkage(Z)
nodes = _order_cluster_tree(Z)
if height is not None and n_clusters is not None:
raise ValueError("At least one of either height or n_clusters "
"must be None")
elif height is None and n_clusters is None: # return the full cut tree
cols_idx = np.arange(nobs)
elif height is not None:
heights = np.array([x.dist for x in nodes])
cols_idx = np.searchsorted(heights, height)
else:
cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
try:
n_cols = len(cols_idx)
except TypeError: # scalar
n_cols = 1
cols_idx = np.array([cols_idx])
groups = np.zeros((n_cols, nobs), dtype=int)
last_group = np.arange(nobs)
if 0 in cols_idx:
groups[0] = last_group
for i, node in enumerate(nodes):
idx = node.pre_order()
this_group = last_group.copy()
this_group[idx] = last_group[idx].min()
this_group[this_group > last_group[idx].max()] -= 1
if i + 1 in cols_idx:
groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group
last_group = this_group
return groups.T
def to_tree(Z, rd=False):
"""
Convert a linkage matrix into an easy-to-use tree object.
The reference to the root `ClusterNode` object is returned (by default).
Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``,
and ``count`` attribute. The left and right attributes point to
ClusterNode objects that were combined to generate the cluster.
If both are None then the `ClusterNode` object is a leaf node, its count
must be 1, and its distance is meaningless but set to 0.
*Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.*
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the `linkage`
function documentation).
rd : bool, optional
When False (default), a reference to the root `ClusterNode` object is
returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a
reference to the root node while ``d`` is a list of `ClusterNode`
objects - one per original entry in the linkage matrix plus entries
for all clustering steps. If a cluster id is
less than the number of samples ``n`` in the data that the linkage
matrix describes, then it corresponds to a singleton cluster (leaf
node).
See `linkage` for more information on the assignment of cluster ids
to clusters.
Returns
-------
tree : ClusterNode or tuple (ClusterNode, list of ClusterNode)
If ``rd`` is False, a `ClusterNode`.
If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number
of samples. See the description of `rd` above for more details.
See Also
--------
linkage, is_valid_linkage, ClusterNode
Examples
--------
>>> from scipy.cluster import hierarchy
>>> rng = np.random.default_rng()
>>> x = rng.random((5, 2))
>>> Z = hierarchy.linkage(x)
>>> hierarchy.to_tree(Z)
<scipy.cluster.hierarchy.ClusterNode object at ...
>>> rootnode, nodelist = hierarchy.to_tree(Z, rd=True)
>>> rootnode
<scipy.cluster.hierarchy.ClusterNode object at ...
>>> len(nodelist)
9
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# Number of original objects is equal to the number of rows plus 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in range(0, n):
d[i] = ClusterNode(i)
nd = None
for i, row in enumerate(Z):
fi = int(row[0])
fj = int(row[1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], row[2])
# ^ id ^ left ^ right ^ dist
if row[3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def optimal_leaf_ordering(Z, y, metric='euclidean'):
"""
Given a linkage matrix Z and distance, reorder the cut tree.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix. See
`linkage` for more information on the return structure and
algorithm.
y : ndarray
The condensed distance matrix from which Z was generated.
Alternatively, a collection of m observation vectors in n
dimensions may be passed as an m by n array.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``pdist``
function for a list of valid distance metrics. A custom distance
function can also be used.
Returns
-------
Z_ordered : ndarray
A copy of the linkage matrix Z, reordered to minimize the distance
between adjacent leaves.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> rng = np.random.default_rng()
>>> X = rng.standard_normal((10, 10))
>>> Z = hierarchy.ward(X)
>>> hierarchy.leaves_list(Z)
array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32)
>>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X))
array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32)
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
y = _convert_to_double(np.asarray(y, order='c'))
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
[y] = _copy_arrays_if_base_present([y])
elif y.ndim == 2:
if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
if np.all(y >= 0) and np.allclose(y, y.T):
_warning('The symmetric non-negative hollow observation '
'matrix looks suspiciously like an uncondensed '
'distance matrix')
y = distance.pdist(y, metric)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
if not np.all(np.isfinite(y)):
raise ValueError("The condensed distance matrix must contain only "
"finite values.")
return _optimal_leaf_ordering.optimal_leaf_ordering(Z, y)
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculate the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see `linkage` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``Y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
See Also
--------
linkage : for a description of what a linkage matrix is.
scipy.spatial.distance.squareform : transforming condensed matrices into square ones.
Examples
--------
>>> from scipy.cluster.hierarchy import single, cophenet
>>> from scipy.spatial.distance import pdist, squareform
Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance
between two points of ``X`` is the distance between the largest two
distinct clusters that each of the points:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
``X`` corresponds to this dataset ::
x x x x
x x
x x
x x x x
>>> Z = single(pdist(X))
>>> Z
array([[ 0., 1., 1., 2.],
[ 2., 12., 1., 3.],
[ 3., 4., 1., 2.],
[ 5., 14., 1., 3.],
[ 6., 7., 1., 2.],
[ 8., 16., 1., 3.],
[ 9., 10., 1., 2.],
[11., 18., 1., 3.],
[13., 15., 2., 6.],
[17., 20., 2., 9.],
[19., 21., 2., 12.]])
>>> cophenet(Z)
array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2.,
2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.])
The output of the `scipy.cluster.hierarchy.cophenet` method is
represented in condensed form. We can use
`scipy.spatial.distance.squareform` to see the output as a
regular matrix (where each element ``ij`` denotes the cophenetic distance
between each ``i``, ``j`` pair of points in ``X``):
>>> squareform(cophenet(Z))
array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.],
[2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]])
In this example, the cophenetic distance between points on ``X`` that are
very close (i.e., in the same corner) is 1. For other pairs of points is 2,
because the points will be located in clusters at different
corners - thus, the distance between these clusters will be larger.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy**2
denomB = Zz**2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculate inconsistency statistics on a linkage matrix.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
clustering). See `linkage` documentation for more information on its
form.
d : int, optional
The number of links up to `d` levels below each non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link
statistics for the non-singleton cluster ``i``. The link statistics are
computed over the link heights for links :math:`d` levels below the
cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is the number
of links included in the calculation; and ``R[i,3]`` is the
inconsistency coefficient,
.. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
Notes
-----
This function behaves similarly to the MATLAB(TM) ``inconsistent``
function.
Examples
--------
>>> from scipy.cluster.hierarchy import inconsistent, linkage
>>> from matplotlib import pyplot as plt
>>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]
>>> Z = linkage(X, 'ward')
>>> print(Z)
[[ 5. 6. 0. 2. ]
[ 2. 7. 0. 2. ]
[ 0. 4. 1. 2. ]
[ 1. 8. 1.15470054 3. ]
[ 9. 10. 2.12132034 4. ]
[ 3. 12. 4.11096096 5. ]
[11. 13. 14.07183949 8. ]]
>>> inconsistent(Z)
array([[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 1. , 0. , 1. , 0. ],
[ 0.57735027, 0.81649658, 2. , 0.70710678],
[ 1.04044011, 1.06123822, 3. , 1.01850858],
[ 3.11614065, 1.40688837, 2. , 0.70710678],
[ 6.44583366, 6.76770586, 3. , 1.12682288]])
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Convert a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the
number of original observations (leaves) in the non-singleton
cluster ``i``.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with ``scipy.cluster.hierarchy``.
See Also
--------
linkage : for a description of what a linkage matrix is.
to_mlab_linkage : transform from SciPy to MATLAB format.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.hierarchy import ward, from_mlab_linkage
Given a linkage matrix in MATLAB format ``mZ``, we can use
`scipy.cluster.hierarchy.from_mlab_linkage` to import
it into SciPy format:
>>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1],
... [10, 11, 1], [3, 13, 1.29099445],
... [6, 14, 1.29099445],
... [9, 15, 1.29099445],
... [12, 16, 1.29099445],
... [17, 18, 5.77350269],
... [19, 20, 5.77350269],
... [21, 22, 8.16496581]])
>>> Z = from_mlab_linkage(mZ)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[ 11. , 15. , 1.29099445, 3. ],
[ 16. , 17. , 5.77350269, 6. ],
[ 18. , 19. , 5.77350269, 6. ],
[ 20. , 21. , 8.16496581, 12. ]])
As expected, the linkage matrix ``Z`` returned includes an
additional column counting the number of original samples in
each cluster. Also, all cluster indices are reduced by 1
(MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing).
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Convert a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by ``scipy.cluster.hierarchy``.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
See Also
--------
linkage : for a description of what a linkage matrix is.
from_mlab_linkage : transform from Matlab to SciPy format.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, to_mlab_linkage
>>> from scipy.spatial.distance import pdist
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
After a linkage matrix ``Z`` has been created, we can use
`scipy.cluster.hierarchy.to_mlab_linkage` to convert it
into MATLAB format:
>>> mZ = to_mlab_linkage(Z)
>>> mZ
array([[ 1. , 2. , 1. ],
[ 4. , 5. , 1. ],
[ 7. , 8. , 1. ],
[ 10. , 11. , 1. ],
[ 3. , 13. , 1.29099445],
[ 6. , 14. , 1.29099445],
[ 9. , 15. , 1.29099445],
[ 12. , 16. , 1.29099445],
[ 17. , 18. , 5.77350269],
[ 19. , 20. , 5.77350269],
[ 21. , 22. , 8.16496581]])
The new linkage matrix ``mZ`` uses 1-indexing for all the
clusters (instead of 0-indexing). Also, the last column of
the original linkage matrix has been dropped.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Return True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
See Also
--------
linkage : for a description of what a linkage matrix is.
Examples
--------
>>> from scipy.cluster.hierarchy import median, ward, is_monotonic
>>> from scipy.spatial.distance import pdist
By definition, some hierarchical clustering algorithms - such as
`scipy.cluster.hierarchy.ward` - produce monotonic assignments of
samples to clusters; however, this is not always true for other
hierarchical methods - e.g. `scipy.cluster.hierarchy.median`.
Given a linkage matrix ``Z`` (as the result of a hierarchical clustering
method) we can test programmatically whether it has the monotonicity
property or not, using `scipy.cluster.hierarchy.is_monotonic`:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> is_monotonic(Z)
True
>>> Z = median(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
>>> is_monotonic(Z)
False
Note that this method is equivalent to just verifying that the distances
in the third column of the linkage matrix appear in a monotonically
increasing order.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Return True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> R = inconsistent(Z)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.14549722, 0.20576415, 2. , 0.70710678],
[1.14549722, 0.20576415, 2. , 0.70710678],
[1.14549722, 0.20576415, 2. , 0.70710678],
[1.14549722, 0.20576415, 2. , 0.70710678],
[2.78516386, 2.58797734, 3. , 1.15470054],
[2.78516386, 2.58797734, 3. , 1.15470054],
[6.57065706, 1.38071187, 3. , 1.15470054]])
Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that
``R`` is correct:
>>> is_valid_im(R)
True
However, if ``R`` is wrongly constructed (e.g., one of the standard
deviations is set to a negative value), then the check will fail:
>>> R[-1,1] = R[-1,1] * -1
>>> is_valid_im(R)
False
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Check the validity of a linkage matrix.
A linkage matrix is valid if it is a 2-D array (type double)
with :math:`n` rows and 4 columns. The first two columns must contain
indices between 0 and :math:`2n-1`. For a given row ``i``, the following
two expressions have to hold:
.. math::
0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
0 \\leq Z[i,1] \\leq i+n-1
I.e., a cluster cannot join another cluster unless the cluster being joined
has been generated.
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
See Also
--------
linkage: for a description of what a linkage matrix is.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, is_valid_linkage
>>> from scipy.spatial.distance import pdist
All linkage matrices generated by the clustering methods in this module
will be valid (i.e., they will have the appropriate dimensions and the two
required expressions will hold for all the rows).
We can check this using `scipy.cluster.hierarchy.is_valid_linkage`:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> is_valid_linkage(Z)
True
However, if we create a linkage matrix in a wrong way - or if we modify
a valid one in a way that any of the required expressions don't hold
anymore, then the check will fail:
>>> Z[3][1] = 20 # the cluster number 20 is not defined at this point
>>> is_valid_linkage(Z)
False
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in range(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in range(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in range(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Return the number of original observations of the linkage matrix passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, num_obs_linkage
>>> from scipy.spatial.distance import pdist
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
``Z`` is a linkage matrix obtained after using the Ward clustering method
with ``X``, a dataset with 12 data points.
>>> num_obs_linkage(Z)
12
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Check for correspondence between linkage and condensed distance matrices.
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
See Also
--------
linkage : for a description of what a linkage matrix is.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, correspond
>>> from scipy.spatial.distance import pdist
This method can be used to check if a given linkage matrix ``Z`` has been
obtained from the application of a cluster method over a dataset ``X``:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> X_condensed = pdist(X)
>>> Z = ward(X_condensed)
Here, we can compare ``Z`` and ``X`` (in condensed form):
>>> correspond(Z, X_condensed)
True
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Form flat clusters from the hierarchical clustering defined by
the given linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : scalar
For criteria 'inconsistent', 'distance' or 'monocrit',
this is the threshold to apply when forming flat clusters.
For 'maxclust' or 'maxclust_monocrit' criteria,
this would be max number of clusters requested.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` :
If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t`, then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` :
Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` :
Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` :
Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` :
Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e., given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length ``n``. ``T[i]`` is the flat cluster number to
which original observation ``i`` belongs.
See Also
--------
linkage : for information about hierarchical clustering methods work.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, fcluster
>>> from scipy.spatial.distance import pdist
All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward`
generate a linkage matrix ``Z`` as their output:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
This matrix represents a dendrogram, where the first and second elements
are the two clusters merged at each step, the third element is the
distance between these clusters, and the fourth element is the size of
the new cluster - the number of original data points included.
`scipy.cluster.hierarchy.fcluster` can be used to flatten the
dendrogram, obtaining as a result an assignation of the original data
points to single clusters.
This assignation mostly depends on a distance threshold ``t`` - the maximum
inter-cluster distance allowed:
>>> fcluster(Z, t=0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, t=1.1, criterion='distance')
array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
>>> fcluster(Z, t=3, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, t=9, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
In the first case, the threshold ``t`` is too small to allow any two
samples in the data to form a cluster, so 12 different clusters are
returned.
In the second case, the threshold is large enough to allow the first
4 points to be merged with their nearest neighbors. So, here, only 8
clusters are returned.
The third case, with a much higher threshold, allows for up to 8 data
points to be connected - so 4 clusters are returned here.
Lastly, the threshold of the fourth case is large enough to allow for
all data points to be merged together - so a single cluster is returned.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is
the index of the flat cluster to which the original observation ``i``
belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : scalar
For criteria 'inconsistent', 'distance' or 'monocrit',
this is the threshold to apply when forming flat clusters.
For 'maxclust' or 'maxclust_monocrit' criteria,
this would be max number of clusters requested.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str or function, optional
The distance metric for calculating pairwise distances. See
``distance.pdist`` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
See Also
--------
scipy.spatial.distance.pdist : pairwise distance metrics
Notes
-----
This function is similar to the MATLAB function ``clusterdata``.
Examples
--------
>>> from scipy.cluster.hierarchy import fclusterdata
This is a convenience method that abstracts all the steps to perform in a
typical SciPy's hierarchical clustering workflow.
* Transform the input data into a condensed matrix with `scipy.spatial.distance.pdist`.
* Apply a clustering method.
* Obtain flat clusters at a user defined distance threshold ``t`` using `scipy.cluster.hierarchy.fcluster`.
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> fclusterdata(X, t=1)
array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
The output here (for the dataset ``X``, distance threshold ``t``, and the
default settings) is four clusters with three data points each.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Return a list of leaf node ids.
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See `linkage` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
See Also
--------
dendrogram : for information about dendrogram structure.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list
>>> from scipy.spatial.distance import pdist
>>> from matplotlib import pyplot as plt
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
The linkage matrix ``Z`` represents a dendrogram, that is, a tree that
encodes the structure of the clustering performed.
`scipy.cluster.hierarchy.leaves_list` shows the mapping between
indices in the ``X`` dataset and leaves in the dendrogram:
>>> leaves_list(Z)
array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32)
>>> fig = plt.figure(figsize=(25, 10))
>>> dn = dendrogram(Z)
>>> plt.show()
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Remove duplicates AND preserve the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='C0'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError as e:
raise ImportError("You must install the matplotlib library to plot "
"the dendrogram. Use no_plot=True to calculate the "
"dendrogram without plotting.") from e
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Dependent variable plot height
dvw = mh + mh * 0.05
iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation in ('top', 'bottom'):
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
else:
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(iv_ticks)
if orientation == 'top':
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = (float(_get_tick_rotation(len(ivl)))
if (leaf_rotation is None) else leaf_rotation)
leaf_font = (float(_get_tick_text_size(len(ivl)))
if (leaf_font_size is None) else leaf_font_size)
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation in ('left', 'right'):
if orientation == 'left':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
else:
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(iv_ticks)
if orientation == 'left':
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = (float(_get_tick_text_size(len(ivl)))
if (leaf_font_size is None) else leaf_font_size)
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend item
# for each tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there's a grouping of links above the color threshold, it goes last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
Ellipse = matplotlib.patches.Ellipse
for (x, y) in contraction_marks:
if orientation in ('left', 'right'):
e = Ellipse((y, x), width=dvw / 100, height=1.0)
else:
e = Ellipse((x, y), width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
# C0 is used for above threshhold color
_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9')
_link_line_colors = list(_link_line_colors_default)
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for use by dendrogram.
Note that this palette is global (i.e., setting it once changes the colors
for all subsequent calls to `dendrogram`) and that it affects only the
the colors below ``color_threshold``.
Note that `dendrogram` also accepts a custom coloring function through its
``link_color_func`` keyword, which is more flexible and non-global.
Parameters
----------
palette : list of str or None
A list of matplotlib color codes. The order of the color codes is the
order in which the colors are cycled through when color thresholding in
the dendrogram.
If ``None``, resets the palette to its default (which are matplotlib
default colors C1 to C9).
Returns
-------
None
See Also
--------
dendrogram
Notes
-----
Ability to reset the palette with ``None`` added in SciPy 0.17.0.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['C1', 'C0', 'C0', 'C0', 'C0']
>>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
>>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b')
>>> dn['color_list']
['c', 'b', 'b', 'b', 'b']
>>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
... above_threshold_color='k')
>>> dn['color_list']
['c', 'm', 'm', 'k', 'k']
Now, reset the color palette to its default:
>>> hierarchy.set_link_color_palette(None)
"""
if palette is None:
# reset to its default
palette = _link_line_colors_default
elif type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, str) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
global _link_line_colors
_link_line_colors = palette
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, leaf_font_size=None,
leaf_rotation=None, leaf_label_func=None,
show_contracted=False, link_color_func=None, ax=None,
above_threshold_color='C0'):
"""
Plot the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The top of the U-link indicates a
cluster merge. The two legs of the U-link indicate which clusters
were merged. The length of the two legs of the U-link represents
the distance between the child clusters. It is also the
cophenetic distance between original observations in the two
children clusters.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None``
No truncation is performed (default).
Note: ``'none'`` is an alias for ``None`` that's kept for
backward compatibility.
``'lastp'``
The last ``p`` non-singleton clusters formed in the linkage are the
only non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'level'``
No more than ``p`` levels of the dendrogram tree are displayed.
A "level" includes all nodes with ``p`` merges from the final merge.
Note: ``'mtica'`` is an alias for ``'level'`` that's kept for
backward compatibility.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
with de default matplotlib color ``'C0'``. If :math:`t` is less
than or equal to zero, all nodes are colored ``'C0'``.
If ``color_threshold`` is None or 'default',
corresponding with MATLAB(TM) behavior, the threshold is set to
``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default, ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized
sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the
text to put under the :math:`i` th leaf node only if it corresponds to
an original observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descending'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note, ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When ``leaf_label_func`` is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
# leaf_label_func can also be used together with ``truncate_mode`` parameter,
# in which case you will get your leaves labeled after truncation:
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90,
truncate_mode='level', p=2)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is ``'C0'``.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
``'leaves_color_list'``
A list of color names. The k'th element represents the color of the
k'th leaf.
See Also
--------
linkage, set_link_color_palette
Notes
-----
It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise
crossings appear in the dendrogram.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> import matplotlib.pyplot as plt
A very basic example:
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> plt.figure()
>>> dn = hierarchy.dendrogram(Z)
Now, plot in given axes, improve the color scheme and use both vertical and
horizontal orientations:
>>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
>>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
>>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
... orientation='top')
>>> dn2 = hierarchy.dendrogram(Z, ax=axes[1],
... above_threshold_color='#bcbddc',
... orientation='right')
>>> hierarchy.set_link_color_palette(None) # reset to default after use
>>> plt.show()
"""
# This feature was thought about but never implemented (still useful?):
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
if labels is not None and Z.shape[0] + 1 != len(labels):
raise ValueError("Dimensions of Z and labels must be consistent.")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
# 'mlab' and 'mtica' are kept working for backwards compat.
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica':
# 'mtica' is an alias
truncate_mode = 'level'
if truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
ivl = [] # list of leaves
if color_threshold is None or (isinstance(color_threshold, str) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
# Empty list will be filled in _dendrogram_calculate_info
contraction_marks = [] if show_contracted else None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2*n - 2,
iv=0.0,
ivl=ivl,
n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list,
lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
R["leaves_color_list"] = _get_leaves_color_list(R)
return R
def _get_leaves_color_list(R):
leaves_color_list = [None] * len(R['leaves'])
for link_x, link_y, link_color in zip(R['icoord'],
R['dcoord'],
R['color_list']):
for (xi, yi) in zip(link_x, link_y):
if yi == 0.0: # if yi is 0.0, the point is a leaf
# xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`)
# index of leaves are 0, 1, 2, 3, ... as below
leaf_index = (int(xi) - 5) // 10
# each leaf has a same color of its link.
leaves_color_list[leaf_index] = link_color
return leaves_color_list
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='C0'):
"""
Calculate the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-singleton
# cluster, its label is either the empty string or the number of
# original observations belonging to cluster i.
if 2*n - p > i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode == 'level':
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
msg = "Mode 'mlab' is deprecated in scipy 0.19.0 (it never worked)."
warnings.warn(msg, DeprecationWarning)
# Otherwise, only truncate if we have a leaf node.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa >= n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab >= n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, str):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determine if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
See Also
--------
linkage : for a description of what a linkage matrix is.
fcluster : for the creation of flat cluster assignments.
Examples
--------
>>> from scipy.cluster.hierarchy import fcluster, is_isomorphic
>>> from scipy.cluster.hierarchy import single, complete
>>> from scipy.spatial.distance import pdist
Two flat cluster assignments can be isomorphic if they represent the same
cluster assignment, with different labels.
For example, we can use the `scipy.cluster.hierarchy.single`: method
and flatten the output to four clusters:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = single(pdist(X))
>>> T = fcluster(Z, 1, criterion='distance')
>>> T
array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)
We can then do the same using the
`scipy.cluster.hierarchy.complete`: method:
>>> Z = complete(pdist(X))
>>> T_ = fcluster(Z, 1.5, criterion='distance')
>>> T_
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
As we can see, in both cases we obtain four clusters and all the data
points are distributed in the same way - the only thing that changes
are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both
cluster assignments are isomorphic:
>>> is_isomorphic(T, T_)
True
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d1 = {}
d2 = {}
for i in range(0, n):
if T1[i] in d1:
if not T2[i] in d2:
return False
if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]:
return False
elif T2[i] in d2:
return False
else:
d1[T1[i]] = T2[i]
d2[T2[i]] = T1[i]
return True
def maxdists(Z):
"""
Return the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
See Also
--------
linkage : for a description of what a linkage matrix is.
is_monotonic : for testing for monotonicity of a linkage matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, maxdists
>>> from scipy.spatial.distance import pdist
Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists`
computes for each new cluster generated (i.e., for each row of the linkage
matrix) what is the maximum distance between any two child clusters.
Due to the nature of hierarchical clustering, in many cases this is going
to be just the distance between the two child clusters that were merged
to form the current one - that is, Z[:,2].
However, for non-monotonic cluster assignments such as
`scipy.cluster.hierarchy.median` clustering this is not always the
case: There may be cluster formations were the distance between the two
clusters merged is smaller than the distance between their children.
We can see this in an example:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
>>> maxdists(Z)
array([1. , 1. , 1. , 1. , 1.11803399,
1.11803399, 1.11803399, 1.11803399, 3. , 3.5 ,
3.5 ])
Note that while the distance between the two clusters merged when creating the
last cluster is 3.25, there are two children (clusters 16 and 17) whose distance
is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in
this case.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Return the maximum inconsistency coefficient for each
non-singleton cluster and its children.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
`linkage` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> R = inconsistent(Z)
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 2. , 12. , 1.11803399, 3. ],
[ 5. , 13. , 1.11803399, 3. ],
[ 8. , 15. , 1.11803399, 3. ],
[11. , 14. , 1.11803399, 3. ],
[18. , 19. , 3. , 6. ],
[16. , 17. , 3.5 , 6. ],
[20. , 21. , 3.25 , 12. ]])
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.74535599, 1.08655358, 3. , 1.15470054],
[1.91202266, 1.37522872, 3. , 1.15470054],
[3.25 , 0.25 , 3. , 0. ]])
Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute
the maximum value of the inconsistency statistic (the last column of
``R``) for each non-singleton cluster and its children:
>>> maxinconsts(Z, R)
array([0. , 0. , 0. , 0. , 0.70710678,
0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
1.15470054])
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Return the maximum statistic for each non-singleton cluster and its
children.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
See Also
--------
linkage : for a description of what a linkage matrix is.
inconsistent : for the creation of a inconsistency matrix.
Examples
--------
>>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat
>>> from scipy.spatial.distance import pdist
Given a data set ``X``, we can apply a clustering method to obtain a
linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can
be also used to obtain the inconsistency matrix ``R`` associated to
this clustering process:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = median(pdist(X))
>>> R = inconsistent(Z)
>>> R
array([[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1. , 0. , 1. , 0. ],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.05901699, 0.08346263, 2. , 0.70710678],
[1.74535599, 1.08655358, 3. , 1.15470054],
[1.91202266, 1.37522872, 3. , 1.15470054],
[3.25 , 0.25 , 3. , 0. ]])
`scipy.cluster.hierarchy.maxRstat` can be used to compute
the maximum value of each column of ``R``, for each non-singleton
cluster and its children:
>>> maxRstat(Z, R, 0)
array([1. , 1. , 1. , 1. , 1.05901699,
1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266,
3.25 ])
>>> maxRstat(Z, R, 1)
array([0. , 0. , 0. , 0. , 0.08346263,
0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872,
1.37522872])
>>> maxRstat(Z, R, 3)
array([0. , 0. , 0. , 0. , 0.70710678,
0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,
1.15470054])
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Return the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z, such that:
* leaf descendants belong only to flat cluster j
(i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where
:math:`S(i)` is the set of leaf ids of descendant leaf nodes
with cluster node :math:`i`)
* there does not exist a leaf that is not a descendant with
:math:`i` that also belongs to cluster :math:`j`
(i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
`linkage` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array,
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array, where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
See Also
--------
fcluster : for the creation of flat cluster assignments.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, fcluster, leaders
>>> from scipy.spatial.distance import pdist
Given a linkage matrix ``Z`` - obtained after apply a clustering method
to a dataset ``X`` - and a flat cluster assignment array ``T``:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
>>> T = fcluster(Z, 3, criterion='distance')
>>> T
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
`scipy.cluster.hierarchy.leaders` returns the indices of the nodes
in the dendrogram that are the leaders of each flat cluster:
>>> L, M = leaders(Z, T)
>>> L
array([16, 17, 18, 19], dtype=int32)
(remember that indices 0-11 point to the 12 data points in ``X``,
whereas indices 12-22 point to the 11 rows of ``Z``)
`scipy.cluster.hierarchy.leaders` also returns the indices of
the flat clusters in ``T``:
>>> M
array([1, 2, 3, 4], dtype=int32)
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
| bsd-3-clause |
nbdanielson/sima | examples/spikeinference.py | 6 | 4351 | from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from scipy import signal
from scipy.stats import uniform, norm
import numpy as np
import seaborn as sns
import matplotlib.mlab as ml
import matplotlib.pyplot as plt
from sima import spikes
#########
# PART 1: Make model calcium data
#########
# Data parameters
RATE = 1 # mean firing rate of poisson spike train (Hz)
STEPS = 5000 # number of time steps in data
TAU = 0.6 # time constant of calcium indicator (seconds)
DELTAT = 1 / 30 # time step duration (seconds)
SIGMA = 0.1 # standard deviation of gaussian noise
SEED = 2222 # random number generator seed
NTRACE = 5 # number of data traces to generate
# Make a poisson spike trains
SPIKES = [spikes.get_poisson_spikes(deltat=DELTAT, rate=RATE,
steps=STEPS, seed=SEED + i)
for i in range(NTRACE)]
SPIKES = np.asarray(SPIKES)
# Convolve with kernel to make calcium signal
np.random.seed(SEED)
GAMMA = 1 - (DELTAT / TAU)
CALCIUM = signal.lfilter([1], [1, -GAMMA], SPIKES)
TIME = np.linspace(0, STEPS * DELTAT, STEPS)
# Make fluorescence traces with random gaussian noise and baseline
FLUORS = [CALCIUM[i, ] + norm.rvs(scale=SIGMA, size=STEPS) + uniform.rvs()
for i in range(NTRACE)]
FLUORS = np.asarray(FLUORS)
#########
# PART 2: Estimate model parameters and perform spike inference
#########
# Perform spike inference on all simulated fluorescence traces
INFERENCE = np.zeros([STEPS, NTRACE])
FITS = np.zeros([STEPS, NTRACE])
# Jointly estimate gamma on traces concatenated together
[joint_gamma_est, joint_sigma_est] = spikes.estimate_parameters(
FLUORS.reshape(FLUORS.size), mode="correct")
for x in range(NTRACE):
# Estimate noise and decay parameters
[gamma_est, sigma_est] = spikes.estimate_parameters(
FLUORS[x, ], mode="correct", gamma=joint_gamma_est)
print("tau = {tau}, sigma = {sigma}".format(
tau=DELTAT / (1 - gamma_est), sigma=sigma_est))
# Run spike inference
INFERENCE[:, x], FITS[:, x], params = spikes.spike_inference(
FLUORS[x, ], sigma=sigma_est, gamma=joint_gamma_est, verbose=True)
#########
# PART 3: Plot results
#########
# Close all open figures
plt.close("all")
# Set up plotting style
sns.set(context="talk", rc={"figure.figsize": [20, 6]}, style="white")
sns.set_palette("muted", desat=.6)
tck = [0, .5, 1]
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, facecolor='w')
# Which cell to plot in first figure
cp = 0
# Plot the simulated data and model fit from the first result
plt.axes(ax1)
sns.tsplot(FLUORS[cp, ], ax=ax1, time=TIME)
sns.tsplot(FITS[:, cp], ax=ax1, time=TIME, color="red")
ax1.set_ylabel("Data and Fit")
plt.yticks(
np.round([FLUORS[cp].min(), FLUORS[cp].mean(), FLUORS[cp].max()], 1))
# Plot the true spike train
plt.axes(ax2)
plt.bar(TIME, SPIKES[cp, ], color="DimGray", width=DELTAT)
ax2.set_ylabel("True Spikes")
plt.yticks(tck)
plt.ylim(-.1, 1.1)
# Get true positives and false positives
spike_cutoff = 0.1
i_times = ml.find(INFERENCE[:, cp] > spike_cutoff) # inferred spikes
t_times = ml.find(SPIKES[cp, :]) # true spikes
sInds = np.intersect1d(i_times, t_times) # indices of true positives
wInds = np.setdiff1d(i_times, t_times) # indices of false positives
tp = float(sInds.size) / float(i_times.size) # true positive rate
fp = float(wInds.size) / \
(STEPS - float(t_times.size)) # false positive rate
# Plot the spike inference
plt.axes(ax3)
plt.bar(
TIME[sInds], np.ones(sInds.size),
color="LightGrey", edgecolor="LightGrey", width=DELTAT)
plt.bar(
TIME[wInds], np.ones(wInds.size),
color="Red", edgecolor="Red", width=DELTAT)
plt.bar(
TIME, INFERENCE[:, 0] / INFERENCE[:, 0].max(),
color="DimGray", edgecolor="DimGray", width=DELTAT)
ax3.set_xlabel("Time (Seconds)")
ax3.set_ylabel("Spike Inference")
sns.despine(bottom=True, left=True)
plt.yticks(tck)
plt.ylim(-.1, 1.1)
plt.title(
"TP rate = " + str(round(tp, 2)) + "; FP rate = " + str(round(fp, 2)))
# Plot all traces and inference
plt.figure(5, facecolor='w')
plt.subplot(211)
plt.imshow(FLUORS, aspect="auto", interpolation="none")
plt.colorbar()
plt.subplot(212)
plt.imshow(INFERENCE.transpose(), aspect="auto", interpolation="none")
plt.colorbar()
plt.show()
| gpl-2.0 |
wackymaster/QTClock | Libraries/matplotlib/backends/qt_compat.py | 4 | 6635 | """ A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
import sys
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
elif rcParams['backend'] == 'Qt4Agg':
QT_RC_MAJOR_VERSION = 4
else:
# A different backend was specified, but we still got here because a Qt
# related file was imported. This is allowed, so lets try and guess
# what we should be using.
if "PyQt4" in sys.modules or "PySide" in sys.modules:
# PyQt4 or PySide is actually used.
QT_RC_MAJOR_VERSION = 4
else:
# This is a fallback: PyQt5
QT_RC_MAJOR_VERSION = 5
QT_API = None
if (QT_API_ENV is not None):
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
elif rcParams['backend'] == 'Qt4Agg':
QT_API = rcParams['backend.qt4']
else:
# A different backend was specified, but we still got here because a Qt
# related file was imported. This is allowed, so lets try and guess
# what we should be using.
if "PyQt4" in sys.modules or "PySide" in sys.modules:
# PyQt4 or PySide is actually used.
QT_API = rcParams['backend.qt4']
else:
# This is a fallback: PyQt5
QT_API = rcParams['backend.qt5']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API == QT_API_PYQT5:
try:
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
except ImportError:
# fell through, tried PyQt5, failed fall back to PyQt4
QT_API = rcParams['backend.qt4']
QT_RC_MAJOR_VERSION = 4
# needs to be if so we can re-test the value of QT_API which may
# have been changed in the above if block
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
try:
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
except NameError:
# QtCore did not get imported, fall back to pyside
QT_API = QT_API_PYSIDE
if QT_API == QT_API_PYSIDE: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
| mit |
synthicity/pandana | pandana/loaders/tests/test_pandash5.py | 1 | 4542 | import os
import tempfile
import pandas as pd
import pytest
import pandas.util.testing as pdt
from pandana import Network
from pandana.testing import skipiftravis
from pandana.loaders import pandash5 as ph5
@pytest.fixture(scope='module')
def nodes():
return pd.DataFrame(
{'x': [1, 2, 3, 4] * 3,
'y': [1] * 4 + [2] * 4 + [3] * 4})
@pytest.fixture(scope='module')
def edges():
return pd.DataFrame(
{'from': [0, 4, 5, 6, 2, 2, 6, 10, 9, 7],
'to': [4, 5, 6, 2, 1, 3, 10, 9, 8, 11]})
@pytest.fixture(scope='module')
def impedance_names():
return ['distance', 'time']
@pytest.fixture(scope='module')
def edge_weights(edges, impedance_names):
return pd.DataFrame(
{impedance_names[0]: [1] * len(edges),
impedance_names[1]: list(range(1, len(edges) + 1))})
@pytest.fixture(scope='module')
def two_way():
return True
@pytest.fixture(scope='module')
def network(nodes, edges, edge_weights, two_way):
return Network(
nodes['x'], nodes['y'], edges['from'], edges['to'], edge_weights,
two_way)
@pytest.fixture(scope='module')
def edges_df(edges, edge_weights):
return edges.join(edge_weights)
@pytest.fixture(scope='module')
def rm_nodes():
return [0, 7, 6]
@pytest.fixture
def tmpfile(request):
fname = tempfile.NamedTemporaryFile().name
def cleanup():
if os.path.exists(fname):
os.remove(fname)
request.addfinalizer(cleanup)
return fname
@skipiftravis
def test_remove_nodes(network, rm_nodes):
# node 0 is connected to node 4, which is in turn connected to node 5
# node 7 is connected to node 11, which has no other connections
# node 6 is connected to nodes 2, 5, and 10,
# which all have other connections
nodes, edges = ph5.remove_nodes(network, rm_nodes)
exp_nodes = pd.DataFrame(
{'x': [2, 3, 4, 1, 2, 1, 2, 3, 4],
'y': [1, 1, 1, 2, 2, 3, 3, 3, 3]},
index=[1, 2, 3, 4, 5, 8, 9, 10, 11])
exp_edges = pd.DataFrame(
{'from': [4, 2, 2, 10, 9],
'to': [5, 1, 3, 9, 8],
'distance': [1, 1, 1, 1, 1],
'time': [2, 5, 6, 8, 9]},
index=[1, 4, 5, 7, 8])
exp_edges = exp_edges[['from', 'to', 'distance', 'time']] # order columns
pdt.assert_frame_equal(nodes, exp_nodes)
pdt.assert_frame_equal(edges, exp_edges)
@skipiftravis
def test_network_to_pandas_hdf5(
tmpfile, network, nodes, edges_df, impedance_names, two_way):
ph5.network_to_pandas_hdf5(network, tmpfile)
store = pd.HDFStore(tmpfile)
pdt.assert_frame_equal(store['nodes'], nodes)
pdt.assert_frame_equal(store['edges'], edges_df)
pdt.assert_series_equal(store['two_way'], pd.Series([two_way]))
pdt.assert_series_equal(
store['impedance_names'], pd.Series(impedance_names))
@skipiftravis
def test_network_to_pandas_hdf5_removal(
tmpfile, network, impedance_names, two_way, rm_nodes):
nodes, edges = ph5.remove_nodes(network, rm_nodes)
ph5.network_to_pandas_hdf5(network, tmpfile, rm_nodes)
store = pd.HDFStore(tmpfile)
pdt.assert_frame_equal(store['nodes'], nodes)
pdt.assert_frame_equal(store['edges'], edges)
pdt.assert_series_equal(store['two_way'], pd.Series([two_way]))
pdt.assert_series_equal(
store['impedance_names'], pd.Series(impedance_names))
@skipiftravis
def test_network_from_pandas_hdf5(
tmpfile, network, nodes, edges_df, impedance_names, two_way):
ph5.network_to_pandas_hdf5(network, tmpfile)
new_net = ph5.network_from_pandas_hdf5(Network, tmpfile)
pdt.assert_frame_equal(new_net.nodes_df, nodes)
pdt.assert_frame_equal(new_net.edges_df, edges_df)
assert new_net._twoway == two_way
assert new_net.impedance_names == impedance_names
@skipiftravis
def test_network_save_load_hdf5(
tmpfile, network, impedance_names, two_way, rm_nodes):
network.save_hdf5(tmpfile, rm_nodes)
new_net = Network.from_hdf5(tmpfile)
nodes, edges = ph5.remove_nodes(network, rm_nodes)
pdt.assert_frame_equal(new_net.nodes_df, nodes)
pdt.assert_frame_equal(new_net.edges_df, edges)
assert new_net._twoway == two_way
assert new_net.impedance_names == impedance_names
# this is an odd place for this test because it's not related to HDF5,
# but my test Network is perfect.
@skipiftravis
def test_network_low_connectivity_nodes(network, impedance_names):
nodes = network.low_connectivity_nodes(10, 3, imp_name=impedance_names[0])
assert list(nodes) == [7, 11]
| agpl-3.0 |
uoryon/cluster-scheduler-simulator | src/main/python/graphing-scripts/comparison-plot-from-protobuff.py | 5 | 23735 | #!/usr/bin/python
# Copyright (c) 2013, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution. Neither the name of the University of California, Berkeley
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission. THIS
# SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file generates a set of graphs for a simulator "experiment".
# An experiment is equivalent to the file generated from the run of a
# single Experiment object in the simulator (i.e. a parameter sweep for a
# set of workload_descs), with the added constraint that only one of
# C, L, or lambda can be varied per a single series (the simulator
# currently allows ranges to be provided for more than one of these).
import sys, os, re
from utils import *
import numpy as np
import matplotlib.pyplot as plt
import math
import operator
import logging
from collections import defaultdict
import cluster_simulation_protos_pb2
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
def usage():
print "usage: scheduler-business.py <output folder> <REMOVED: input_protobuff> " \
"<paper_mode: 0|1> <vary_dim: c|l|lambda> <env: any of A,B,C> [png]"
sys.exit(1)
# if len(sys.argv) < 6:
# logging.error("Not enough arguments provided.")
# usage()
paper_mode = True
output_formats = ['pdf']
# try:
# output_prefix = str(sys.argv[1])
# input_protobuff = sys.argv[2]
# if int(sys.argv[3]) == 1:
# paper_mode = True
# vary_dim = sys.argv[4]
# if vary_dim not in ['c', 'l', 'lambda']:
# logging.error("vary_dim must be c, l, or lambda!")
# sys.exit(1)
# envs_to_plot = sys.argv[5]
# if re.search("[^ABC]",envs_to_plot):
# logging.error("envs_to_plot must be any combination of a, b, and c, without spaces!")
# sys.exit(1)
# if len(sys.argv) == 7:
# if sys.argv[6] == "png":
# output_formats.append('png')
# else:
# logging.error("The only valid optional 5th argument is 'png'")
# sys.exit(1)
#
# except:
# usage()
#
# set_leg_fontsize(11)
# logging.info("Output prefix: %s" % output_prefix)
# logging.info("Input file: %s" % input_protobuff)
# google-omega-resfit-allornoth-single_path-vary_l-604800.protobuf
# google-omega-resfit-inc-single_path-vary_l-604800.protobuf
# google-omega-seqnum-allornoth-single_path-vary_l-604800.protobuf
# google-omega-seqnum-inc-single_path-vary_l-604800.protobuf
envs_to_plot = "C"
file_dir = '/Users/andyk/omega-7day-simulator-results/'
output_prefix = file_dir + "/graphs"
file_names = [("Fine/Gang", "google-omega-resfit-allornoth-single_path-vary_c-604800.protobuf"),
("Fine/Inc", "google-omega-resfit-inc-single_path-vary_c-604800.protobuf"),
("Coarse/Gang", "google-omega-seqnum-allornoth-single_path-vary_c-604800.protobuf"),
("Course/Inc", "google-omega-seqnum-inc-single_path-vary_c-604800.protobuf")]
experiment_result_sets = []
for title_name_tuple in file_names:
title = title_name_tuple[0]
file_name = title_name_tuple[1]
full_name = file_dir + file_name
# Read in the ExperimentResultSet.
#experiment_result_sets.append((title, cluster_simulation_protos_pb2.ExperimentResultSet()))
res_set = cluster_simulation_protos_pb2.ExperimentResultSet()
experiment_result_sets.append([title, res_set])
#titles[experiment_result_sets[-1]] = title
f = open(full_name, "rb")
res_set.ParseFromString(f.read())
f.close()
# ---------------------------------------
# Set up some general graphing variables.
if paper_mode:
set_paper_rcs()
fig = plt.figure(figsize=(2,1.33))
else:
fig = plt.figure()
prefilled_colors_web = { 'A': 'b', 'B': 'r', 'C': 'c', "synth": 'y' }
colors_web = { 'A': 'b', 'B': 'r', 'C': 'm', "synth": 'y' }
colors_paper = { 'A': 'b', 'B': 'r', 'C': 'c', "synth": 'b' }
per_wl_colors = { 'OmegaBatch': 'b',
'OmegaService': 'r' }
title_colors_web = { "Fine/Gang": 'b', "Fine/Inc": 'r', "Coarse/Gang": 'm', "Course/Inc": 'c' }
prefilled_linestyles_web = { 'Monolithic': 'D-',
'MonolithicApprox': 's-',
'MesosBatch': 'D-',
'MesosService': 'D:',
'MesosBatchApprox': 's-',
'MesosServiceApprox': 's:',
'OmegaBatch': 'D-',
'OmegaService': 'D:',
'OmegaBatchApprox': 's-',
'OmegaServiceApprox': 's:',
'Batch': 'D-',
'Service': 'D:' }
linestyles_web = { 'Monolithic': 'x-',
'MonolithicApprox': 'o-',
'MesosBatch': 'x-',
'MesosService': 'x:',
'MesosBatchApprox': 'o-',
'MesosServiceApprox': 'o:',
'OmegaBatch': 'x-',
'OmegaService': 'x:',
'OmegaBatchApprox': 'o-',
'OmegaServiceApprox': 'o:',
'Batch': 'x-',
'Service': 'x:' }
linestyles_paper = { 'Monolithic': '-',
'MonolithicApprox': '--',
'MesosBatch': '-',
'MesosService': ':',
'MesosBatchApprox': '--',
'MesosServiceApprox': '-.',
'OmegaBatch': '-',
'OmegaService': ':',
'OmegaBatchApprox': '--',
'OmegaServiceApprox': '-.',
'Batch': '-',
'Service': ':' }
dashes_paper = { 'Monolithic': (None,None),
'MonolithicApprox': (3,3),
'MesosBatch': (None,None),
'MesosService': (1,1),
'MesosBatchApprox': (3,3),
'MesosServiceApprox': (4,2),
'OmegaBatch': (None,None),
'OmegaService': (1,1),
'OmegaBatchApprox': (3,3),
'OmegaServiceApprox': (4,2),
'Batch': (None,None),
'Service': (1,1),
'Fine/Gang': (1,1),
'Fine/Inc': (3,3),
'Coarse/Gang': (4,2)
}
# Some dictionaries whose values will be dictionaries
# to make 2d dictionaries, which will be indexed by both exp_env
# and either workoad or scheduler name.
# --
# (cellName, assignmentPolicy, workload_name) -> array of data points
# for the parameter sweep done in the experiment.
workload_queue_time_till_first = {}
workload_queue_time_till_fully = {}
workload_queue_time_till_first_90_ptile = {}
workload_queue_time_till_fully_90_ptile = {}
workload_num_jobs_unscheduled = {}
# (cellName, assignmentPolicy, scheduler_name) -> array of data points
# for the parameter sweep done in the experiment.
sched_total_busy_fraction = {}
sched_daily_busy_fraction = {}
sched_daily_busy_fraction_err = {}
# TODO(andyk): Graph retry_busy_fraction on same graph as total_busy_fraction
# to parallel Malte's graphs.
# sched_retry_busy_fraction = {}
sched_conflict_fraction = {}
sched_daily_conflict_fraction = {}
sched_daily_conflict_fraction_err = {}
sched_task_conflict_fraction = {}
sched_num_retried_transactions = {}
sched_num_jobs_remaining = {}
sched_failed_find_victim_attempts = {}
# Convenience wrapper to override __str__()
class ExperimentEnv:
def __init__(self, init_exp_env):
self.exp_env = init_exp_env
self.cell_name = init_exp_env.cell_name
self.workload_split_type = init_exp_env.workload_split_type
self.is_prefilled = init_exp_env.is_prefilled
self.run_time = init_exp_env.run_time
def __str__(self):
return str("%s, %s" % (self.exp_env.cell_name, self.exp_env.workload_split_type))
# Figure out if we are varying c, l, or lambda in this experiment.
def vary_dim(self):
env = self.exp_env # Make a convenient short handle.
assert(len(env.experiment_result) > 1)
if (env.experiment_result[0].constant_think_time !=
env.experiment_result[1].constant_think_time):
vary_dim = "c"
# logging.debug("Varying %s. The first two experiments' c values were %d, %d "
# % (vary_dim,
# env.experiment_result[0].constant_think_time,
# env.experiment_result[1].constant_think_time))
elif (env.experiment_result[0].per_task_think_time !=
env.experiment_result[1].per_task_think_time):
vary_dim = "l"
# logging.debug("Varying %s. The first two experiments' l values were %d, %d "
# % (vary_dim,
# env.experiment_result[0].per_task_think_time,
# env.experiment_result[1].per_task_think_time))
else:
vary_dim = "lambda"
# logging.debug("Varying %s." % vary_dim)
return vary_dim
class Value:
def __init__(self, init_x, init_y):
self.x = init_x
self.y = init_y
def __str__(self):
return str("%f, %f" % (self.x, self.y))
def bt_approx(cell_name, sched_name, point, vary_dim_, tt_c, tt_l, runtime):
logging.debug("sched_name is %s " % sched_name)
assert(sched_name == "Batch" or sched_name == "Service")
lbd = {}
n = {}
# This function calculates an approximated scheduler busyness line given
# an average inter-arrival time and job size for each scheduler
# XXX: configure the below parameters and comment out the following
# line in order to
# 1) disable the warning, and
# 2) get a correct no-conflict approximation.
print >> sys.stderr, "*********************************************\n" \
"WARNING: YOU HAVE NOT CONFIGURED THE PARAMETERS IN THE bt_approx\n" \
"*********************************************\n"
################################
# XXX EDIT BELOW HERE
# hard-coded SAMPLE params for cluster A
lbd['A'] = { "Batch": 0.1, "Service": 0.01 } # lambdas for 0: serv & 1: Batch
n['A'] = { "Batch": 10.0, "Service": 5.0 } # avg num tasks per job
# hard-coded SAMPLE params for cluster B
lbd['B'] = { "Batch": 0.1, "Service": 0.01 }
n['B'] = { "Batch": 10.0, "Service": 5.0 }
# hard-coded SAMPLE params for cluster C
lbd['C'] = { "Batch": 0.1, "Service": 0.01 }
n['C'] = { "Batch": 10.0, "Service": 5.0 }
################################
# approximation formula
if vary_dim_ == 'c':
# busy_time = num_jobs * (per_job_think_time = C + nL) / runtime
return runtime * lbd[cell_name][sched_name] * \
((point + n[cell_name][sched_name] * float(tt_l))) / runtime
elif vary_dim_ == 'l':
return runtime * lbd[cell_name][sched_name] * \
((float(tt_c) + n[cell_name][sched_name] * point)) / runtime
def get_mad(median, data):
#print "in get_mad, with median %f, data: %s" % (median, " ".join([str(i) for i in data]))
devs = [abs(x - median) for x in data]
mad = np.median(devs)
#print "returning mad = %f" % mad
return mad
def sort_labels(handles, labels):
hl = sorted(zip(handles, labels),
key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
return (handles2, labels2)
for experiment_result_set_arry in experiment_result_sets:
title = experiment_result_set_arry[0]
logging.debug("\n\n==========================\nHandling title %s." % title)
experiment_result_set = experiment_result_set_arry[1]
# Loop through each experiment environment.
logging.debug("Processing %d experiment envs."
% len(experiment_result_set.experiment_env))
for env in experiment_result_set.experiment_env:
if not re.search(cell_to_anon(env.cell_name), envs_to_plot):
logging.debug(" skipping env/cell " + env.cell_name)
continue
logging.debug("\n\n\n env: " + env.cell_name)
exp_env = ExperimentEnv(env) # Wrap the protobuff object to get __str__()
logging.debug(" Handling experiment env %s." % exp_env)
# Within this environment, loop through each experiment result
logging.debug(" Processing %d experiment results." % len(env.experiment_result))
for exp_result in env.experiment_result:
logging.debug(" Handling experiment with per_task_think_time %f, constant_think_time %f"
% (exp_result.per_task_think_time, exp_result.constant_think_time))
# Record the correct x val depending on which dimension is being
# swept over in this experiment.
vary_dim = exp_env.vary_dim() # This line is unecessary since this value
# is a flag passed as an arg to the script.
if vary_dim == "c":
x_val = exp_result.constant_think_time
elif vary_dim == "l":
x_val = exp_result.per_task_think_time
else:
x_val = exp_result.avg_job_interarrival_time
# logging.debug("Set x_val to %f." % x_val)
# Build results dictionaries of per-scheduler stats.
for sched_stat in exp_result.scheduler_stats:
# Per day busy time and conflict fractions.
daily_busy_fractions = []
daily_conflict_fractions = []
daily_conflicts = [] # counts the mean of daily abs # of conflicts.
daily_successes = []
logging.debug(" handling scheduler %s" % sched_stat.scheduler_name)
for day_stats in sched_stat.per_day_stats:
# Calculate the total busy time for each of the days and then
# take median of all fo them.
run_time_for_day = exp_env.run_time - 86400 * day_stats.day_num
# logging.debug("setting run_time_for_day = exp_env.run_time - 86400 * "
# "day_stats.day_num = %f - 86400 * %d = %f"
# % (exp_env.run_time, day_stats.day_num, run_time_for_day))
if run_time_for_day > 0.0:
daily_busy_fractions.append(((day_stats.useful_busy_time +
day_stats.wasted_busy_time) /
min(86400.0, run_time_for_day)))
if day_stats.num_successful_transactions > 0:
conflict_fraction = (float(day_stats.num_failed_transactions) /
float(day_stats.num_successful_transactions))
daily_conflict_fractions.append(conflict_fraction)
daily_conflicts.append(float(day_stats.num_failed_transactions))
daily_successes.append(float(day_stats.num_successful_transactions))
# logging.debug("appending daily_conflict_fraction %f / %f = %f."
# % (float(day_stats.num_failed_transactions),
# float(day_stats.num_successful_transactions),
# conflict_fraction))
else:
daily_conflict_fractions.append(0)
# Daily busy time median.
daily_busy_time_med = np.median(daily_busy_fractions)
logging.debug(" Daily_busy_fractions, med: %f, vals: %s"
% (daily_busy_time_med,
" ".join([str(i) for i in daily_busy_fractions])))
value = Value(x_val, daily_busy_time_med)
append_or_create_2d(sched_daily_busy_fraction,
title,
sched_stat.scheduler_name,
value)
#logging.debug("sched_daily_busy_fraction[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Error Bar (MAD) for daily busy time.
value = Value(x_val, get_mad(daily_busy_time_med,
daily_busy_fractions))
append_or_create_2d(sched_daily_busy_fraction_err,
title,
sched_stat.scheduler_name,
value)
#logging.debug("sched_daily_busy_fraction_err[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Daily conflict fraction median.
daily_conflict_fraction_med = np.median(daily_conflict_fractions)
logging.debug(" Daily_abs_num_conflicts, med: %f, vals: %s"
% (np.median(daily_conflicts),
" ".join([str(i) for i in daily_conflicts])))
logging.debug(" Daily_num_successful_conflicts, med: %f, vals: %s"
% (np.median(daily_successes),
" ".join([str(i) for i in daily_successes])))
logging.debug(" Daily_conflict_fractions, med : %f, vals: %s\n --"
% (daily_conflict_fraction_med,
" ".join([str(i) for i in daily_conflict_fractions])))
value = Value(x_val, daily_conflict_fraction_med)
append_or_create_2d(sched_daily_conflict_fraction,
title,
sched_stat.scheduler_name,
value)
# logging.debug("sched_daily_conflict_fraction[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Error Bar (MAD) for daily conflict fraction.
value = Value(x_val, get_mad(daily_conflict_fraction_med,
daily_conflict_fractions))
append_or_create_2d(sched_daily_conflict_fraction_err,
title,
sched_stat.scheduler_name,
value)
def plot_2d_data_set_dict(data_set_2d_dict,
plot_title,
filename_suffix,
y_label,
y_axis_type,
error_bars_data_set_2d_dict = None):
assert(y_axis_type == "0-to-1" or
y_axis_type == "ms-to-day" or
y_axis_type == "abs")
plt.clf()
ax = fig.add_subplot(111)
for title, name_to_val_map in data_set_2d_dict.iteritems():
for wl_or_sched_name, values in name_to_val_map.iteritems():
line_label = title
# Hacky: chop MonolithicBatch, MesosBatch, MonolithicService, etc.
# down to "Batch" and "Service" if in paper mode.
updated_wl_or_sched_name = wl_or_sched_name
if paper_mode and re.search("Batch", wl_or_sched_name):
updated_wl_or_sched_name = "Batch"
if paper_mode and re.search("Service", wl_or_sched_name):
updated_wl_or_sched_name = "Service"
# Don't show lines for service frameworks
if updated_wl_or_sched_name == "Batch":
"Skipping a line for a service scheduler"
continue
x_vals = [value.x for value in values]
# Rewrite zero's for the y_axis_types that will be log.
y_vals = [0.00001 if (value.y == 0 and y_axis_type == "ms-to-day")
else value.y for value in values]
logging.debug("Plotting line for %s %s %s." %
(title, updated_wl_or_sched_name, plot_title))
#logging.debug("x vals: " + " ".join([str(i) for i in x_vals]))
#logging.debug("y vals: " + " ".join([str(i) for i in y_vals]))
logging.debug("wl_or_sched_name: " + wl_or_sched_name)
logging.debug("title: " + title)
ax.plot(x_vals, y_vals,
dashes=dashes_paper[wl_or_sched_name],
color=title_colors_web[title],
label=line_label, markersize=4,
mec=title_colors_web[title])
setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type)
def setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type):
assert(y_axis_type == "0-to-1" or
y_axis_type == "ms-to-day" or
y_axis_type == "abs")
# Paper title.
if not paper_mode:
plt.title(plot_title)
if paper_mode:
try:
# Set up the legend, for removing the border if in paper mode.
handles, labels = ax.get_legend_handles_labels()
handles2, labels2 = sort_labels(handles, labels)
leg = plt.legend(handles2, labels2, loc=2, labelspacing=0)
fr = leg.get_frame()
fr.set_linewidth(0)
except:
print "Failed to remove frame around legend, legend probably is empty."
# Axis labels.
if not paper_mode:
ax.set_ylabel(y_label)
if vary_dim == "c":
ax.set_xlabel(u'Scheduler 1 constant processing time [sec]')
elif vary_dim == "l":
ax.set_xlabel(u'Scheduler 1 per-task processing time [sec]')
elif vary_dim == "lambda":
ax.set_xlabel(u'Job arrival rate to scheduler 1, $\lambda_1$')
# x-axis scale, limit, tics and tic labels.
ax.set_xscale('log')
ax.set_autoscalex_on(False)
if vary_dim == 'c':
plt.xlim(xmin=0.01)
plt.xticks((0.01, 0.1, 1, 10, 100), ('10ms', '0.1s', '1s', '10s', '100s'))
elif vary_dim == 'l':
plt.xlim(xmin=0.001, xmax=1)
plt.xticks((0.001, 0.01, 0.1, 1), ('1ms', '10ms', '0.1s', '1s'))
elif vary_dim == 'lambda':
plt.xlim([0.1, 100])
plt.xticks((0.1, 1, 10, 100), ('0.1s', '1s', '10s', '100s'))
# y-axis limit, tics and tic labels.
if y_axis_type == "0-to-1":
logging.debug("Setting up y-axis for '0-to-1' style graph.")
plt.ylim([0, 1])
plt.yticks((0, 0.2, 0.4, 0.6, 0.8, 1.0),
('0.0', '0.2', '0.4', '0.6', '0.8', '1.0'))
elif y_axis_type == "ms-to-day":
logging.debug("Setting up y-axis for 'ms-to-day' style graph.")
#ax.set_yscale('symlog', linthreshy=0.001)
ax.set_yscale('log')
plt.ylim(ymin=0.01, ymax=24*3600)
plt.yticks((0.01, 1, 60, 3600, 24*3600), ('10ms', '1s', '1m', '1h', '1d'))
elif y_axis_type == "abs":
plt.ylim(ymin=0)
logging.debug("Setting up y-axis for 'abs' style graph.")
#plt.yticks((0.01, 1, 60, 3600, 24*3600), ('10ms', '1s', '1m', '1h', '1d'))
else:
logging.error('y_axis_label parameter must be either "0-to-1"'
', "ms-to-day", or "abs".')
sys.exit(1)
final_filename = (output_prefix +
('/sisi-vary-%s-vs-' % vary_dim) +
filename_suffix)
logging.debug("Writing plot to %s", final_filename)
writeout(final_filename, output_formats)
#SCHEDULER DAILY BUSY AND CONFLICT FRACTION MEDIANS
plot_2d_data_set_dict(sched_daily_busy_fraction,
"Scheduler processing time vs. median(daily busy time fraction)",
"daily-busy-fraction-med",
u'Median(daily busy time fraction)',
"0-to-1")
plot_2d_data_set_dict(sched_daily_conflict_fraction,
"Scheduler processing time vs. median(daily conflict fraction)",
"daily-conflict-fraction-med",
u'Median(daily conflict fraction)',
"0-to-1")
| bsd-3-clause |
elingg/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 18 | 4185 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
SpatialMetabolomics/SM_distributed | tests/test_es_exporter.py | 2 | 10310 | import logging
from datetime import datetime
from unittest.mock import MagicMock, patch
import pandas as pd
import time
from sm.engine import MolecularDB
from sm.engine.es_export import ESExporter, ESIndexManager, DATASET_SEL, ANNOTATIONS_SEL
from sm.engine import DB
from sm.engine.ion_centroids_gen import IonCentroidsGenerator
from sm.engine.isocalc_wrapper import IsocalcWrapper
from sm.engine.tests.util import sm_config, ds_config, sm_index, es, es_dsl_search, test_db
def wait_for_es(sec=1):
time.sleep(sec)
def test_index_ds_works(es_dsl_search, sm_index, sm_config):
ds_id = '2000-01-01_00h00m'
upload_dt = datetime.now().isoformat(' ')
mol_db_id = 0
last_finished = '2017-01-01T00:00:00'
def db_sel_side_effect(sql, params):
if sql == DATASET_SEL:
return [{
'ds_id': ds_id,
'ds_name': 'ds_name',
'ds_input_path': 'ds_input_path',
'ds_config': 'ds_config',
'ds_meta': {},
'ds_upload_dt': upload_dt,
'ds_status': 'ds_status',
'ds_last_finished': datetime.strptime(last_finished, '%Y-%m-%dT%H:%M:%S'),
'ds_is_public': True,
'ds_ion_img_storage': 'fs',
'ds_acq_geometry': {}
}]
elif sql == ANNOTATIONS_SEL:
return [{
'sf': 'H2O',
'sf_adduct': 'H2O+H',
'chaos': 1,
'image_corr': 1,
'pattern_match': 1,
'total_iso_ints': 100,
'min_iso_ints': 0,
'max_iso_ints': 100,
'msm': 1,
'adduct': '+H',
'job_id': 1,
'fdr': 0.1,
'iso_image_ids': ['iso_img_id_1', 'iso_img_id_2'],
'polarity': '+'
}, {
'sf': 'Au',
'sf_adduct': 'Au+H',
'chaos': 1,
'image_corr': 1,
'pattern_match': 1,
'total_iso_ints': 100,
'min_iso_ints': 0,
'max_iso_ints': 100,
'msm': 1,
'adduct': '+H',
'job_id': 1,
'fdr': 0.05,
'iso_image_ids': ['iso_img_id_1', 'iso_img_id_2'],
'polarity': '+'
}]
else:
logging.getLogger('engine').error('Wrong db_sel_side_effect arguments: ', args)
db_mock = MagicMock(spec=DB)
db_mock.select_with_fields.side_effect = db_sel_side_effect
mol_db_mock = MagicMock(MolecularDB)
mol_db_mock.id = mol_db_id
mol_db_mock.name = 'db_name'
mol_db_mock.version = '2017'
mol_db_mock.get_molecules.return_value = pd.DataFrame([('H2O', 'mol_id', 'mol_name'), ('Au', 'mol_id', 'mol_name')],
columns=['sf', 'mol_id', 'mol_name'])
isocalc_mock = MagicMock(IsocalcWrapper)
isocalc_mock.ion_centroids = lambda sf, adduct: {
('H2O', '+H'): ([100., 200.], None),
('Au', '+H'): ([10., 20.], None)
}[(sf, adduct)]
es_exp = ESExporter(db_mock)
es_exp.delete_ds(ds_id)
es_exp.index_ds(ds_id=ds_id, mol_db=mol_db_mock, isocalc=isocalc_mock)
wait_for_es(sec=1)
ds_d = es_dsl_search.filter('term', _type='dataset').execute().to_dict()['hits']['hits'][0]['_source']
assert ds_d == {
'ds_last_finished': last_finished, 'ds_config': 'ds_config', 'ds_meta': {},
'ds_status': 'ds_status', 'ds_name': 'ds_name', 'ds_input_path': 'ds_input_path', 'ds_id': ds_id,
'ds_upload_dt': upload_dt,
'annotation_counts': [{'db': {'name': 'db_name', 'version': '2017'},
'counts': [{'level': 5, 'n': 1}, {'level': 10, 'n': 2},
{'level': 20, 'n': 2}, {'level': 50, 'n': 2}]}],
'ds_is_public': True,
'ds_acq_geometry': {},
'ds_ion_img_storage': 'fs'
}
ann_1_d = es_dsl_search.filter('term', sf='H2O').execute().to_dict()['hits']['hits'][0]['_source']
assert ann_1_d == {
'pattern_match': 1, 'image_corr': 1, 'fdr': 0.1, 'chaos': 1, 'sf': 'H2O', 'min_iso_ints': 0,
'msm': 1, 'sf_adduct': 'H2O+H', 'total_iso_ints': 100, 'centroid_mzs': [100., 200.],
'iso_image_ids': ['iso_img_id_1', 'iso_img_id_2'], 'polarity': '+', 'job_id': 1, 'max_iso_ints': 100,
'adduct': '+H', 'ds_name': 'ds_name', 'annotation_counts': [], 'db_version': '2017', 'ds_status': 'ds_status',
'ion_add_pol': '[M+H]+', 'comp_names': ['mol_name'], 'db_name': 'db_name', 'mz': 100., 'ds_meta': {},
'comp_ids': ['mol_id'], 'ds_config': 'ds_config', 'ds_input_path': 'ds_input_path', 'ds_id': ds_id,
'ds_upload_dt': upload_dt, 'ds_last_finished': last_finished,
'ds_ion_img_storage': 'fs', 'ds_is_public': True
}
ann_2_d = es_dsl_search.filter('term', sf='Au').execute().to_dict()['hits']['hits'][0]['_source']
assert ann_2_d == {
'pattern_match': 1, 'image_corr': 1, 'fdr': 0.05, 'chaos': 1, 'sf': 'Au', 'min_iso_ints': 0,
'msm': 1, 'sf_adduct': 'Au+H', 'total_iso_ints': 100, 'centroid_mzs': [10., 20.],
'iso_image_ids': ['iso_img_id_1', 'iso_img_id_2'], 'polarity': '+', 'job_id': 1, 'max_iso_ints': 100,
'adduct': '+H', 'ds_name': 'ds_name', 'annotation_counts': [], 'db_version': '2017', 'ds_status': 'ds_status',
'ion_add_pol': '[M+H]+', 'comp_names': ['mol_name'], 'db_name': 'db_name', 'mz': 10., 'ds_meta': {},
'comp_ids': ['mol_id'], 'ds_config': 'ds_config', 'ds_input_path': 'ds_input_path', 'ds_id': ds_id,
'ds_upload_dt': upload_dt, 'ds_last_finished': last_finished,
'ds_ion_img_storage': 'fs', 'ds_is_public': True
}
def test_delete_ds__one_db_ann_only(es, sm_index, sm_config):
index = sm_config['elasticsearch']['index']
es.create(index=index, doc_type='annotation', id='id1',
body={'ds_id': 'dataset1', 'db_name': 'HMDB', 'db_version': '2016'})
es.create(index=index, doc_type='annotation', id='id2',
body={'ds_id': 'dataset1', 'db_name': 'ChEBI', 'db_version': '2016'})
es.create(index=index, doc_type='annotation', id='id3',
body={'ds_id': 'dataset2', 'db_name': 'HMDB', 'db_version': '2016'})
es.create(index=index, doc_type='dataset', id='id4',
body={'ds_id': 'dataset1', 'db_name': 'HMDB', 'db_version': '2016'})
wait_for_es(sec=1)
db_mock = MagicMock(spec=DB)
moldb_mock = MagicMock(spec=MolecularDB)
moldb_mock.name = 'HMDB'
moldb_mock.version = '2016'
es_exporter = ESExporter(db_mock)
es_exporter.delete_ds(ds_id='dataset1', mol_db=moldb_mock)
wait_for_es(sec=1)
body = {
'query': {
'bool': {
'filter': []
}
}
}
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset1'}}, {'term': {'db_name': 'HMDB'}}]
assert es.count(index=index, doc_type='annotation', body=body)['count'] == 0
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset1'}}, {'term': {'db_name': 'ChEBI'}}]
assert es.count(index=index, doc_type='annotation', body=body)['count'] == 1
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset2'}}, {'term': {'db_name': 'HMDB'}}]
assert es.count(index=index, doc_type='annotation', body=body)['count'] == 1
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset1'}}, {'term': {'_type': 'dataset'}}]
assert es.count(index=index, doc_type='dataset', body=body)['count'] == 1
def test_delete_ds__completely(es, sm_index, sm_config):
index = sm_config['elasticsearch']['index']
es.create(index=index, doc_type='annotation', id='id1',
body={'ds_id': 'dataset1', 'db_name': 'HMDB', 'db_version': '2016'})
es.create(index=index, doc_type='annotation', id='id2',
body={'ds_id': 'dataset1', 'db_name': 'ChEBI', 'db_version': '2016'})
es.create(index=index, doc_type='annotation', id='id3',
body={'ds_id': 'dataset2', 'db_name': 'HMDB', 'db_version': '2016'})
es.create(index=index, doc_type='dataset', id='dataset1',
body={'ds_id': 'dataset1', 'db_name': 'HMDB', 'db_version': '2016'})
wait_for_es(sec=1)
db_mock = MagicMock(spec=DB)
es_exporter = ESExporter(db_mock)
es_exporter.delete_ds(ds_id='dataset1')
wait_for_es(sec=1)
body = {
'query': {
'bool': {
'filter': []
}
}
}
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset1'}}, {'term': {'db_name': 'HMDB'}}]
assert es.count(index=index, doc_type='annotation', body=body)['count'] == 0
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset1'}}, {'term': {'db_name': 'ChEBI'}}]
assert es.count(index=index, doc_type='annotation', body=body)['count'] == 0
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset2'}}, {'term': {'db_name': 'HMDB'}}]
assert es.count(index=index, doc_type='annotation', body=body)['count'] == 1
body['query']['bool']['filter'] = [{'term': {'ds_id': 'dataset1'}}, {'term': {'_type': 'dataset'}}]
assert es.count(index=index, doc_type='dataset', body=body)['count'] == 0
def test_rename_index_works(test_db, sm_config):
es_config = sm_config['elasticsearch']
alias = es_config['index']
es_man = ESIndexManager(es_config)
es_man.create_index('{}-yin'.format(alias))
es_man.remap_alias('{}-yin'.format(alias), alias=alias)
assert es_man.exists_index(alias)
assert es_man.exists_index('{}-yin'.format(alias))
assert not es_man.exists_index('{}-yang'.format(alias))
es_man.create_index('{}-yang'.format(alias))
es_man.remap_alias('{}-yang'.format(alias), alias=alias)
assert es_man.exists_index(alias)
assert es_man.exists_index('{}-yang'.format(alias))
assert not es_man.exists_index('{}-yin'.format(alias))
def test_internal_index_name_return_valid_values(sm_config):
es_config = sm_config['elasticsearch']
alias = es_config['index']
es_man = ESIndexManager(es_config)
assert es_man.internal_index_name(alias) in ['{}-yin'.format(alias), '{}-yang'.format(alias)]
| apache-2.0 |
StochasticNumerics/mimclib | setup.py | 1 | 1622 | import os
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install as cmd_install
class install(cmd_install, object):
def run(self): super(install, self).run()
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name="mimclib",
# 1st is increased if it is not compatible with previous version.
# 2nd is increased if new features are added.
# 3rd is increased for small fixes.
version="1.0.2.dev4",
author="Abdul-Lateef Haji-Ali",
author_email="[email protected]",
description="A library implementing the MIMC and CMLMC methods.",
license="BSD",
url="http://stochastic_numerics.kaust.edu.sa/",
packages=find_packages(),
long_description=read('README.md'),
install_requires=[],#['matplotlib>=1.5', 'numpy>=1.9', 'scipy>=0.17.0', 'dill'],
extras_require={'mysqldb': ["MySQL-python"]},
ext_modules=[
Extension('mimclib.libset_util',
['mimclib/libsetutil/src/set_util.cpp',
'mimclib/libsetutil/src/var_list.cpp',],
include_dirs=[''],
library_dirs=['/'],
libraries=[],
extra_compile_args=['-std=c++11'])],
cmdclass={'install': install},
)
# Need to install the package: libmysqlclient-dev
| gpl-2.0 |
ZENGXH/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/linear_model/plot_iris_logistic.py | 1 | 1677 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| mit |
larsmans/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
michigraber/scikit-learn | sklearn/tests/test_multiclass.py | 72 | 24581 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
mtesseract/plots | WeierstrassElliptic/wp-plots.py | 1 | 6915 | # Copyright (C) 2014, 2015 Moritz Schulte <[email protected]>
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import cmath
import itertools
import math
from mpl_toolkits.mplot3d import proj3d
import matplotlib.animation as animation
# Draw a point, given its (x, y, z) coordinates.
def addPoint(x1, x2, x3, color):
ax.scatter([x1], [x2], zs=[x3], c=color, s=[100], depthshade=False)
# Draw a complex number, using stereographic projection.
def addNumberColored(z, color):
if cmath.isinf (z):
addPoint(0, 0, 1, color)
else:
(x1, x2, x3) = stePro (z.real, z.imag)
addPoint(x1, x2, x3, color)
# Stereographic Projection: Given x and y, compute the coordinates (X,
# Y, Z) on the unit sphere embedded in $\mathbb{R}^3$ .
def stePro(x,y):
x1 = 2*x / (1 + x**2 + y**2)
x2 = 2*y / (1 + x**2 + y**2)
x3 = (x**2 + y**2 - 1) / (1 + x**2 + y**2)
return (x1, x2, x3)
# This draws the complex projective space $\mathbb{P}_1$ as a sphere.
def drawRiemannSphere():
# Begin with polar coordinates...
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
# ... and convert them to cartesian coordinates.
x = 1 * np.outer(np.cos(u), np.sin(v))
y = 1 * np.outer(np.sin(u), np.sin(v))
z = 1 * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='b', alpha=0.5, linewidth=0, cmap='cool')
addNumberColored(float("inf"), 'g')
addNumberColored(0, 'y')
################################################
# Implementation of the Weierstrass P function #
################################################
# These are the lattice points which we use for approximating the Weierstrass elliptic functions.
# First in cartesian coordinates...
latticepointsXY = list(filter(lambda x: x != (0,0), itertools.product(range(-10,11),range(-10,11))))
# ... then in the complex plane.
latticepoints = list(map (lambda x: x[0] + x[1]*1j, latticepointsXY))
# $w = \wp(z)$
def wp(z):
if z == 0 or z in latticepoints:
return float("inf") + float("inf") * 1j
else:
w = 1/(z**2) + sum(map (lambda a: 1/(z-a)**2 - 1/a**2, latticepoints))
return w
# Compute the stereographic projection of $\wp(x + t i)$.
def wpPlot(t, x):
z = x + t*1j
w = wp (z)
return stePro (w.real, w.imag)
#################################################
# Implementation of the Weierstrass P' function #
#################################################
# $w = \wp'(z)$
def wpP(z):
if z == 0 or z in latticepoints:
return float("inf") + float("inf") * 1j
else:
w = -2/(z**3) - 2 * sum(map (lambda a: 1/(z-a)**3, latticepoints))
return w
# Compute the stereographic projection of $\wp'(x + t i)$.
def wpPPlot(t, x):
z = x + t*1j
w = wpP (z)
return stePro (w.real, w.imag)
# [[x0, y0, z0], [x1, y1, y1], ...] => ([x0, x1, ...], [y0, y1, ...], [z0, z1, ...])
def extractComponents(tuples):
xs = []
ys = []
zs = []
while True:
if tuples == []:
return (xs, ys, zs)
else:
t = tuples[0]
xs.append (t[0])
ys.append (t[1])
zs.append (t[2])
tuples = tuples[1:]
def takeUntil(l,x):
lRes = []
while l != []:
lRes.append(l[0])
if l[0] == x:
break
else:
l = l[1:]
return lRes
# We use this for bookkeeping.
lastpaths = []
# Initialize drawing canvas.
fig = plt.figure(frameon=True, figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
# Plot a single "slice" of the Weierstrass P function. I.e., on the
# standard square torus an interval of the form
# $I_{y(t)} = {(x, y(t)) | 0 \leq x \leq 1}$.
def plotSlice(t, f):
global lastpaths
xGrainsN = 100
xGrainsNHalf = round (xGrainsN / 2)
xGrains = np.linspace (0, 1, xGrainsN)
xGrains1 = xGrains[0:xGrainsNHalf]
xGrains2 = xGrains[xGrainsNHalf:xGrainsN]
(xs, ys, zs) = extractComponents (list (map (lambda x: f (t, x), xGrains1)))
path1, = ax.plot (xs, ys, zs=zs, c='g')
lastpaths.append (path1)
(xs, ys, zs) = extractComponents (list (map (lambda x: f (t, x), xGrains2)))
path2, = ax.plot (xs, ys, zs=zs, c='k')
lastpaths.append (path2)
def plotCircle(tEnd, y, f):
global lastpaths
tValues = takeUntil (ts, tEnd)
tValuesTail = tValues[-2:]
(xs, ys, zs) = extractComponents (list (map (lambda t: f (y, t), tValues)))
path, = ax.plot (xs, ys, zs=zs, c='g')
lastpaths.append (path)
(xs, ys, zs) = extractComponents (list (map (lambda t: f (y, t), tValuesTail)))
path, = ax.plot (xs, ys, zs=zs, c='k')
lastpaths.append (path)
# Remove last drawn path.
def clearLastPath():
global lastpaths
if lastpaths != False:
for p in lastpaths:
ax.lines.remove(p)
lastpaths = []
# Main drawing function, varying in time.
def plotWpSlices(t):
clearLastPath ()
plotSlice (t, wpPlot)
# Main drawing function, varying in time.
def plotWpPSlices(t):
clearLastPath ()
plotSlice (t, wpPPlot)
def plotWpCircle0(t):
clearLastPath ()
plotCircle (t, 0, wpPlot)
def plotWpCircle1(t):
clearLastPath ()
plotCircle (t, 0.5, wpPlot)
# Construct the list of t's for which we do the plot:
def makeTs(a, b):
ts = list(np.linspace (a, b, 100))
# Pause for a bit:
for i in range(5):
ts.insert(0, a)
ts.append(b)
return ts
# Create the Animation object
ax.axis("off")
drawRiemannSphere()
def init_fig():
print ("computing...")
# Enable the desired plot:
# Plot global behaviour of $\wp$:
ts = makeTs(0, 0.5)
line_ani = animation.FuncAnimation(fig, plotWpSlices, frames=list(ts), interval=100,
init_func=init_fig, blit=False)
# Save as video, if desired.
# line_ani.save('wp-global.mp4')
# Plot global behaviour of $\wp'$:
# ts = makeTs(0, 0.5)
# line_ani = animation.FuncAnimation(fig, plotWpPSlices, frames=list(makeTs(0, 0.5)), interval=300,
# init_func=init_fig, blit=False)
# Save as video, if desired.
# line_ani.save('wpP-global.mp4')
# Plot the behaviour of $\wp$ along the fixed point circle at $y=0$:
# ts = makeTs(0, 1)
# line_ani = animation.FuncAnimation(fig, plotWpCircle0, frames=list(ts), interval=100, blit=False,
# init_func=init_fig)
# Save as video, if desired.
# line_ani.save('wp-circle0.mp4')
# Plot the behaviour of $\wp$ along the fixed point circle at $y=0.5$:
# ts = makeTs(0, 1)
# line_ani = animation.FuncAnimation(fig, plotWpCircle1, frames=list(ts), interval=100, blit=False,
# init_func=init_fig)
# Save as video, if desired.
# line_ani.save('wp-circle1.mp4')
plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/test_groupby.py | 1 | 249119 | # -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from datetime import datetime
from numpy import nan
from pandas import date_range, bdate_range, Timestamp
from pandas.core.index import Index, MultiIndex, CategoricalIndex
from pandas.core.api import Categorical, DataFrame
from pandas.core.groupby import (SpecificationError, DataError, _nargsort,
_lexsort_indexer)
from pandas.core.series import Series
from pandas.core.config import option_context
from pandas.formats.printing import pprint_thing
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_index_equal, assertRaisesRegexp)
from pandas.compat import (range, long, lrange, StringIO, lmap, lzip, map, zip,
builtins, OrderedDict, product as cart_product)
from pandas import compat
from pandas.core.panel import Panel
from pandas.tools.merge import concat
from collections import defaultdict
from functools import partial
import pandas.core.common as com
import numpy as np
import pandas.core.nanops as nanops
import pandas.util.testing as tm
import pandas as pd
from numpy.testing import assert_equal
class TestGroupBy(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_basic(self):
def checkit(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
self.assertEqual(len(v), 3)
agged = grouped.aggregate(np.mean)
self.assertEqual(agged[1], 1)
assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
assert_series_equal(agged, grouped.mean())
assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
assert_series_equal(value_grouped.aggregate(np.mean), agged,
check_index_type=False)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
agged = grouped.aggregate({'one': np.mean, 'two': np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
self.assertEqual(agged[1], 21)
# corner cases
self.assertRaises(Exception, grouped.aggregate, lambda x: x * 2)
for dtype in ['int64', 'int32', 'float64', 'float32']:
checkit(dtype)
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.groupby('A')
self.assertRaises(KeyError, g.__getitem__, ['C']) # g[['C']]
self.assertRaises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]
with assertRaisesRegexp(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'C']]
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.ix[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.ix[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.ix[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
self.assertTrue(com.isnull(grouped['B'].first()['foo']))
self.assertTrue(com.isnull(grouped['B'].last()['foo']))
self.assertTrue(com.isnull(grouped['B'].nth(0)['foo']))
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.ix[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.ix[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.ix[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
self.assertEqual(s.dtype, 'int64')
f = s.groupby(level=0).first()
self.assertEqual(f.dtype, 'int64')
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.ix[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
self.assertTrue(expected.name, 0)
self.assertEqual(expected.name, 1)
# validate first
v = s[g == 1].iloc[0]
self.assertEqual(expected.iloc[0], v)
self.assertEqual(expected2.iloc[0], v)
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = (df.index.year, df.index.month)
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = self.three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame(
{'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_grouper_index_types(self):
# related GH5375
# groupby misbehaving when using a Floatlike index
df = DataFrame(np.arange(10).reshape(5, 2), columns=list('AB'))
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeIntIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
df.index = index(len(df))
df.groupby(list('abcde')).apply(lambda x: x)
df.index = list(reversed(df.index.tolist()))
df.groupby(list('abcde')).apply(lambda x: x)
def test_grouper_multilevel_freq(self):
# GH 7885
# with level and freq specified in a pd.Grouper
from datetime import date, timedelta
d0 = date.today() - timedelta(days=14)
dates = date_range(d0, date.today())
date_index = pd.MultiIndex.from_product(
[dates, dates], names=['foo', 'bar'])
df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index)
# Check string level
expected = df.reset_index().groupby([pd.Grouper(
key='foo', freq='W'), pd.Grouper(key='bar', freq='W')]).sum()
# reset index changes columns dtype to object
expected.columns = pd.Index([0], dtype='int64')
result = df.groupby([pd.Grouper(level='foo', freq='W'), pd.Grouper(
level='bar', freq='W')]).sum()
assert_frame_equal(result, expected)
# Check integer level
result = df.groupby([pd.Grouper(level=0, freq='W'), pd.Grouper(
level=1, freq='W')]).sum()
assert_frame_equal(result, expected)
def test_grouper_creation_bug(self):
# GH 8795
df = DataFrame({'A': [0, 0, 1, 1, 2, 2], 'B': [1, 2, 3, 4, 5, 6]})
g = df.groupby('A')
expected = g.sum()
g = df.groupby(pd.Grouper(key='A'))
result = g.sum()
assert_frame_equal(result, expected)
result = g.apply(lambda x: x.sum())
assert_frame_equal(result, expected)
g = df.groupby(pd.Grouper(key='A', axis=0))
result = g.sum()
assert_frame_equal(result, expected)
# GH8866
s = Series(np.arange(8, dtype='int64'),
index=pd.MultiIndex.from_product(
[list('ab'), range(2),
date_range('20130101', periods=2)],
names=['one', 'two', 'three']))
result = s.groupby(pd.Grouper(level='three', freq='M')).sum()
expected = Series([28], index=Index(
[Timestamp('2013-01-31')], freq='M', name='three'))
assert_series_equal(result, expected)
# just specifying a level breaks
result = s.groupby(pd.Grouper(level='one')).sum()
expected = s.groupby(level='one').sum()
assert_series_equal(result, expected)
def test_grouper_getting_correct_binner(self):
# GH 10063
# using a non-time-based grouper and a time-based grouper
# and specifying levels
df = DataFrame({'A': 1}, index=pd.MultiIndex.from_product(
[list('ab'), date_range('20130101', periods=80)], names=['one',
'two']))
result = df.groupby([pd.Grouper(level='one'), pd.Grouper(
level='two', freq='M')]).sum()
expected = DataFrame({'A': [31, 28, 21, 31, 28, 21]},
index=MultiIndex.from_product(
[list('ab'),
date_range('20130101', freq='M', periods=3)],
names=['one', 'two']))
assert_frame_equal(result, expected)
def test_grouper_iter(self):
self.assertEqual(sorted(self.df.groupby('A').grouper), ['bar', 'foo'])
def test_empty_groups(self):
# GH # 1048
self.assertRaises(ValueError, self.df.groupby, [])
def test_groupby_grouper(self):
grouped = self.df.groupby('A')
result = self.df.groupby(grouped.grouper).mean()
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_groupby_duplicated_column_errormsg(self):
# GH7511
df = DataFrame(columns=['A', 'B', 'A', 'C'],
data=[range(4), range(2, 6), range(0, 8, 2)])
self.assertRaises(ValueError, df.groupby, 'A')
self.assertRaises(ValueError, df.groupby, ['A', 'B'])
grouped = df.groupby('B')
c = grouped.count()
self.assertTrue(c.columns.nlevels == 1)
self.assertTrue(c.columns.size == 3)
def test_groupby_dict_mapping(self):
# GH #679
from pandas import Series
s = Series({'T1': 5})
result = s.groupby({'T1': 'T2'}).agg(sum)
expected = s.groupby(['T2']).agg(sum)
assert_series_equal(result, expected)
s = Series([1., 2., 3., 4.], index=list('abcd'))
mapping = {'a': 0, 'b': 0, 'c': 1, 'd': 1}
result = s.groupby(mapping).mean()
result2 = s.groupby(mapping).agg(np.mean)
expected = s.groupby([0, 0, 1, 1]).mean()
expected2 = s.groupby([0, 0, 1, 1]).mean()
assert_series_equal(result, expected)
assert_series_equal(result, result2)
assert_series_equal(result, expected2)
def test_groupby_bounds_check(self):
# groupby_X is code-generated, so if one variant
# does, the rest probably do to
a = np.array([1, 2], dtype='object')
b = np.array([1, 2, 3], dtype='object')
self.assertRaises(AssertionError, pd.algos.groupby_object, a, b)
def test_groupby_grouper_f_sanity_checked(self):
dates = date_range('01-Jan-2013', periods=12, freq='MS')
ts = Series(np.random.randn(12), index=dates)
# GH3035
# index.map is used to apply grouper to the index
# if it fails on the elements, map tries it on the entire index as
# a sequence. That can yield invalid results that cause trouble
# down the line.
# the surprise comes from using key[0:6] rather then str(key)[0:6]
# when the elements are Timestamp.
# the result is Index[0:6], very confusing.
self.assertRaises(AssertionError, ts.groupby, lambda key: key[0:6])
def test_groupby_nonobject_dtype(self):
key = self.mframe.index.labels[0]
grouped = self.mframe.groupby(key)
result = grouped.sum()
expected = self.mframe.groupby(key.astype('O')).sum()
assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = self.df_mixed_floats.copy()
df['value'] = lrange(len(df))
def max_value(group):
return group.ix[group['value'].idxmax()]
applied = df.groupby('A').apply(max_value)
result = applied.get_dtype_counts().sort_values()
expected = Series({'object': 2,
'float64': 2,
'int64': 1}).sort_values()
assert_series_equal(result, expected)
def test_groupby_return_type(self):
# GH2893, return a reduced type
df1 = DataFrame([{"val1": 1,
"val2": 20}, {"val1": 1,
"val2": 19}, {"val1": 2,
"val2": 27}, {"val1": 2,
"val2": 12}
])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df1.groupby("val1", squeeze=True).apply(func)
tm.assertIsInstance(result, Series)
df2 = DataFrame([{"val1": 1,
"val2": 20}, {"val1": 1,
"val2": 19}, {"val1": 1,
"val2": 27}, {"val1": 1,
"val2": 12}
])
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
result = df2.groupby("val1", squeeze=True).apply(func)
tm.assertIsInstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=['X', 'Y'])
result = df.groupby('X', squeeze=False).count()
tm.assertIsInstance(result, DataFrame)
# GH5592
# inconcistent return type
df = DataFrame(dict(A=['Tiger', 'Tiger', 'Tiger', 'Lamb', 'Lamb',
'Pony', 'Pony'], B=Series(
np.arange(7), dtype='int64'), C=date_range(
'20130101', periods=7)))
def f(grp):
return grp.iloc[0]
expected = df.groupby('A').first()[['B']]
result = df.groupby('A').apply(f)[['B']]
assert_frame_equal(result, expected)
def f(grp):
if grp.name == 'Tiger':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Tiger'] = np.nan
assert_frame_equal(result, e)
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['B']]
e = expected.copy()
e.loc['Pony'] = np.nan
assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0]
result = df.groupby('A').apply(f)[['C']]
e = df.groupby('A').first()[['C']]
e.loc['Pony'] = pd.NaT
assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == 'Pony':
return None
return grp.iloc[0].loc['C']
result = df.groupby('A').apply(f)
e = df.groupby('A').first()['C'].copy()
e.loc['Pony'] = np.nan
e.name = None
assert_series_equal(result, e)
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1]
else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
tm.assertIsInstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
self.assertRaises(Exception, grouped.agg, lambda x: x.describe())
self.assertRaises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
# TODO(wesm): unused
ser = self.df.C # noqa
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_get_group(self):
wp = tm.makePanel()
grouped = wp.groupby(lambda x: x.month, axis='major')
gp = grouped.get_group(1)
expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1])
assert_panel_equal(gp, expected)
# GH 5267
# be datelike friendly
df = DataFrame({'DATE': pd.to_datetime(
['10-Oct-2013', '10-Oct-2013', '10-Oct-2013', '11-Oct-2013',
'11-Oct-2013', '11-Oct-2013']),
'label': ['foo', 'foo', 'bar', 'foo', 'foo', 'bar'],
'VAL': [1, 2, 3, 4, 5, 6]})
g = df.groupby('DATE')
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group(Timestamp(key).to_datetime())
result3 = g.get_group(str(Timestamp(key)))
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
g = df.groupby(['DATE', 'label'])
key = list(g.groups)[0]
result1 = g.get_group(key)
result2 = g.get_group((Timestamp(key[0]).to_datetime(), key[1]))
result3 = g.get_group((str(Timestamp(key[0])), key[1]))
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
# must pass a same-length tuple with multiple keys
self.assertRaises(ValueError, lambda: g.get_group('foo'))
self.assertRaises(ValueError, lambda: g.get_group(('foo')))
self.assertRaises(ValueError,
lambda: g.get_group(('foo', 'bar', 'baz')))
def test_get_group_grouped_by_tuple(self):
# GH 8121
df = DataFrame([[(1, ), (1, 2), (1, ), (1, 2)]], index=['ids']).T
gr = df.groupby('ids')
expected = DataFrame({'ids': [(1, ), (1, )]}, index=[0, 2])
result = gr.get_group((1, ))
assert_frame_equal(result, expected)
dt = pd.to_datetime(['2010-01-01', '2010-01-02', '2010-01-01',
'2010-01-02'])
df = DataFrame({'ids': [(x, ) for x in dt]})
gr = df.groupby('ids')
result = gr.get_group(('2010-01-01', ))
expected = DataFrame({'ids': [(dt[0], ), (dt[0], )]}, index=[0, 2])
assert_frame_equal(result, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
self.assertEqual(self.ts.dtype, np.float64)
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index(
[], dtype=np.float64))
assert_series_equal(grouped.sum(), exp)
assert_series_equal(grouped.agg(np.sum), exp)
assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float,
index=pd.Index(
[], dtype=np.float64))
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), DataFrame({}, dtype=float))
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_grouping_error_on_multidim_input(self):
from pandas.core.groupby import Grouping
self.assertRaises(ValueError,
Grouping, self.df.index, self.df[['A', 'A']])
def test_agg_python_multiindex(self):
grouped = self.mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
grouped.describe() # it works!
def test_apply_issues(self):
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
assert_series_equal(result, expected)
def test_time_field_bug(self):
# Test a fix for the following error related to GH issue 11324 When
# non-key fields in a group-by dataframe contained time-based fields
# that were not returned by the apply function, an exception would be
# raised.
df = pd.DataFrame({'a': 1, 'b': [datetime.now() for nn in range(10)]})
def func_with_no_date(batch):
return pd.Series({'c': 2})
def func_with_date(batch):
return pd.Series({'c': 2, 'b': datetime(2015, 1, 1)})
dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date)
dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1])
dfg_no_conversion_expected.index.name = 'a'
dfg_conversion = df.groupby(by=['a']).apply(func_with_date)
dfg_conversion_expected = pd.DataFrame(
{'b': datetime(2015, 1, 1),
'c': 2}, index=[1])
dfg_conversion_expected.index.name = 'a'
self.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
self.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
def test_len(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day])
self.assertEqual(len(grouped), len(df))
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len(set([(x.year, x.month) for x in df.index]))
self.assertEqual(len(grouped), expected)
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
self.assertEqual(len(df.groupby(('a'))), 0)
self.assertEqual(len(df.groupby(('b'))), 3)
self.assertEqual(len(df.groupby(('a', 'b'))), 3)
def test_groups(self):
grouped = self.df.groupby(['A'])
groups = grouped.groups
self.assertIs(groups, grouped.groups) # caching works
for k, v in compat.iteritems(grouped.groups):
self.assertTrue((self.df.ix[v]['A'] == k).all())
grouped = self.df.groupby(['A', 'B'])
groups = grouped.groups
self.assertIs(groups, grouped.groups) # caching works
for k, v in compat.iteritems(grouped.groups):
self.assertTrue((self.df.ix[v]['A'] == k[0]).all())
self.assertTrue((self.df.ix[v]['B'] == k[1]).all())
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'],
['C', 'mean'], ['D', 'sem']]))
expected = DataFrame(OrderedDict([['A', grouped['A'].var(
)], ['B', grouped['B'].std()], ['C', grouped['C'].mean()],
['D', grouped['D'].sem()]]))
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_aggregate_item_by_item(self):
df = self.df.copy()
df['E'] = ['a'] * len(self.df)
grouped = self.df.groupby('A')
# API change in 0.11
# def aggfun(ser):
# return len(ser + 'a')
# result = grouped.agg(aggfun)
# self.assertEqual(len(result.columns), 1)
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (self.df.A == 'foo').sum()
bar = (self.df.A == 'bar').sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(np.array([foo] * K), index=list('BCD'),
dtype=np.float64, name='foo')
tm.assert_series_equal(result.xs('foo'), exp)
exp = pd.Series(np.array([bar] * K), index=list('BCD'),
dtype=np.float64, name='bar')
tm.assert_almost_equal(result.xs('bar'), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(self.df.A).agg(aggfun)
tm.assertIsInstance(result, DataFrame)
self.assertEqual(len(result), 0)
def test_agg_item_by_item_raise_typeerror(self):
from numpy.random import randint
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError
self.assertRaises(TypeError, df.groupby(0).agg, raiseException)
def test_basic_regression(self):
# regression
T = [1.0 * x for x in lrange(1, 10) * 10][:1095]
result = Series(T, lrange(0, len(T)))
groupings = np.random.random((1100, ))
groupings = Series(groupings, lrange(0, len(groupings))) * 10.
grouped = result.groupby(groupings)
grouped.mean()
def test_transform(self):
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
self.assertEqual(transformed[7], 12)
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.TimeGrouper('M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast(self):
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
com._ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index)
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
def test_transform_broadcast(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.ts.index))
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = self.tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.tsframe.index))
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in self.tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = self.tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
self.assertTrue(result.index.equals(self.tsframe.index))
self.assertTrue(result.columns.equals(self.tsframe.columns))
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(self):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = self.tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype(self):
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug(self):
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_multiple(self):
grouped = self.ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(self):
df = self.tsframe[::5].reindex(self.tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(self):
f = lambda x: x.mean()
result = self.df.groupby('A')['C', 'D'].transform(f)
selection = self.df[['C', 'D']]
expected = selection.groupby(self.df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(self):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = self.df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(self):
result = self.df.groupby('A').transform('mean')
expected = self.df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = self.df.groupby('A')['C'].transform('mean')
expected = self.df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_transform_length(self):
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_with_na(self):
index = Index(np.arange(10))
for dtype in ['float64', 'float32', 'int64', 'int32', 'int16', 'int8']:
values = Series(np.ones(10), index, dtype=dtype)
labels = Series([nan, 'foo', 'bar', 'bar', nan, nan, 'bar',
'bar', nan, 'foo'], index=index)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
# self.assertTrue(issubclass(agged.dtype.type, np.integer))
# explicity return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=['bar', 'foo'])
assert_series_equal(agged, expected, check_dtype=False)
self.assertTrue(issubclass(agged.dtype.type, np.dtype(dtype).type))
def test_groupby_transform_with_int(self):
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
result = df.groupby('A').transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
result = df.groupby('A').transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
result = df.groupby('A').transform(lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_indices_concatenation_order(self):
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
names=['b', 'c'])
res = DataFrame(None, columns=['a'], index=multiindex)
return res
else:
y = y.set_index(['b', 'c'])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(['b', 'c'])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, labels=[[]] * 2,
names=['foo', 'bar'])
res = DataFrame(None, columns=['a', 'b'], index=multiindex)
return res
else:
return y
df = DataFrame({'a': [1, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
df2 = DataFrame({'a': [3, 2, 2, 2], 'b': lrange(4), 'c': lrange(5, 9)})
# correct result
result1 = df.groupby('a').apply(f1)
result2 = df2.groupby('a').apply(f1)
assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
self.assertRaises(AssertionError, df.groupby('a').apply, f2)
self.assertRaises(AssertionError, df2.groupby('a').apply, f2)
# should fail (incorrect shape)
self.assertRaises(AssertionError, df.groupby('a').apply, f3)
self.assertRaises(AssertionError, df2.groupby('a').apply, f3)
def test_attr_wrapper(self):
grouped = self.ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {}
for name, gp in grouped:
expected[name] = gp.describe()
expected = DataFrame(expected).T
assert_frame_equal(result.unstack(), expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
self.assertRaises(AttributeError, getattr, grouped, 'foo')
def test_series_describe_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe().unstack()
assert_series_equal(result['mean'], grouped.mean(), check_names=False)
assert_series_equal(result['std'], grouped.std(), check_names=False)
assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe()
assert_series_equal(result, expected)
def test_series_agg_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
def test_series_agg_multi_pure_python(self):
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
assert_frame_equal(result, expected)
def test_series_index_name(self):
grouped = self.df.ix[:, ['C']].groupby(self.df['A'])
result = grouped.agg(lambda x: x.mean())
self.assertEqual(result.index.name, 'A')
def test_frame_describe_multikey(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
for col in self.tsframe:
expected = grouped[col].describe()
assert_series_equal(result[col], expected, check_names=False)
groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
for name, group in groupedT:
assert_frame_equal(result[name], group.describe())
def test_frame_groupby(self):
grouped = self.tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), 5)
self.assertEqual(len(aggregated.columns), 4)
# by string
tscopy = self.tsframe.copy()
tscopy['weekday'] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby('weekday').aggregate(np.mean)
assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = self.tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
self.assertEqual(len(transformed), 30)
self.assertEqual(len(transformed.columns), 4)
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean,
check_names=False)
# iterate
for weekday, group in grouped:
self.assertEqual(group.index[0].weekday(), weekday)
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in compat.iteritems(groups):
samething = self.tsframe.index.take(indices[k])
self.assertTrue((samething == v).all())
def test_grouping_is_iterable(self):
# this code path isn't used anywhere else
# not sure it's useful
grouped = self.tsframe.groupby([lambda x: x.weekday(), lambda x: x.year
])
# test it works
for g in grouped.grouper.groupings[0]:
pass
def test_frame_groupby_columns(self):
mapping = {'A': 0, 'B': 0, 'C': 1, 'D': 1}
grouped = self.tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
self.assertEqual(len(aggregated), len(self.tsframe))
self.assertEqual(len(aggregated.columns), 2)
# transform
tf = lambda x: x - x.mean()
groupedT = self.tsframe.T.groupby(mapping, axis=0)
assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
self.assertEqual(len(v.columns), 2)
def test_frame_set_name_single(self):
grouped = self.df.groupby('A')
result = grouped.mean()
self.assertEqual(result.index.name, 'A')
result = self.df.groupby('A', as_index=False).mean()
self.assertNotEqual(result.index.name, 'A')
result = grouped.agg(np.mean)
self.assertEqual(result.index.name, 'A')
result = grouped.agg({'C': np.mean, 'D': np.std})
self.assertEqual(result.index.name, 'A')
result = grouped['C'].mean()
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg(np.mean)
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg([np.mean, np.std])
self.assertEqual(result.index.name, 'A')
result = grouped['C'].agg({'foo': np.mean, 'bar': np.std})
self.assertEqual(result.index.name, 'A')
def test_aggregate_api_consistency(self):
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean],
axis=1)
expected.columns = ['sum', 'mean']
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum,
c_mean,
d_sum,
d_mean],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum,
d_mean,
c_sum,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum,
c_mean],
axis=1)
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean,
c_sum,
d_mean,
d_sum],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum,
c_sum,
d_mean,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
assert_frame_equal(result, expected, check_like=True)
def test_agg_compat(self):
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
result = g['D'].agg({'C': ['sum', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = ['C', 'D']
result = g['D'].agg({'C': 'sum', 'D': 'std'})
assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
def f():
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
self.assertRaises(SpecificationError, f)
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(),
g['D'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
assert_frame_equal(result, expected, check_like=True)
def test_multi_iter(self):
s = Series(np.arange(6))
k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
grouped = s.groupby([k1, k2])
iterated = list(grouped)
expected = [('a', '1', s[[0, 2]]), ('a', '2', s[[1]]),
('b', '1', s[[4]]), ('b', '2', s[[3, 5]])]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assertEqual(e1, one)
self.assertEqual(e2, two)
assert_series_equal(three, e3)
def test_multi_iter_frame(self):
k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
k2 = np.array(['1', '2', '1', '2', '1', '2'])
df = DataFrame({'v1': np.random.randn(6),
'v2': np.random.randn(6),
'k1': k1, 'k2': k2},
index=['one', 'two', 'three', 'four', 'five', 'six'])
grouped = df.groupby(['k1', 'k2'])
# things get sorted!
iterated = list(grouped)
idx = df.index
expected = [('a', '1', df.ix[idx[[4]]]),
('a', '2', df.ix[idx[[3, 5]]]),
('b', '1', df.ix[idx[[0, 2]]]),
('b', '2', df.ix[idx[[1]]])]
for i, ((one, two), three) in enumerate(iterated):
e1, e2, e3 = expected[i]
self.assertEqual(e1, one)
self.assertEqual(e2, two)
assert_frame_equal(three, e3)
# don't iterate through groups with no data
df['k1'] = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
df['k2'] = np.array(['1', '1', '1', '2', '2', '2'])
grouped = df.groupby(['k1', 'k2'])
groups = {}
for key, gp in grouped:
groups[key] = gp
self.assertEqual(len(groups), 2)
# axis = 1
three_levels = self.three_group.groupby(['A', 'B', 'C']).mean()
grouped = three_levels.T.groupby(axis=1, level=(1, 2))
for key, group in grouped:
pass
def test_multi_iter_panel(self):
wp = tm.makePanel()
grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()],
axis=1)
for (month, wd), group in grouped:
exp_axis = [x
for x in wp.major_axis
if x.month == month and x.weekday() == wd]
expected = wp.reindex(major=exp_axis)
assert_panel_equal(group, expected)
def test_multi_func(self):
col1 = self.df['A']
col2 = self.df['B']
grouped = self.df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = self.df.groupby(['A', 'B']).mean()
assert_frame_equal(agged.ix[:, ['C', 'D']], expected.ix[:, ['C', 'D']],
check_names=False) # TODO groupby get drops names
# some "groups" with no data
df = DataFrame({'v1': np.random.randn(6),
'v2': np.random.randn(6),
'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2'])},
index=['one', 'two', 'three', 'four', 'five', 'six'])
# only verify that it works for now
grouped = df.groupby(['k1', 'k2'])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(self):
grouped = self.df.groupby(['A', 'B'])['C']
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({'mean': grouped.agg(np.mean),
'std': grouped.agg(np.std)})
assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list(self):
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
grouped = data.groupby(['A', 'B'])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = concat([grouped['D'].agg(funcs), grouped['E'].agg(funcs),
grouped['F'].agg(funcs)],
keys=['D', 'E', 'F'], axis=1)
assert (isinstance(agged.index, MultiIndex))
assert (isinstance(expected.index, MultiIndex))
assert_frame_equal(agged, expected)
def test_groupby_multiple_columns(self):
data = self.df
grouped = data.groupby(['A', 'B'])
def _check_op(op):
result1 = op(grouped)
expected = defaultdict(dict)
for n1, gp1 in data.groupby('A'):
for n2, gp2 in gp1.groupby('B'):
expected[n1][n2] = op(gp2.ix[:, ['C', 'D']])
expected = dict((k, DataFrame(v))
for k, v in compat.iteritems(expected))
expected = Panel.fromDict(expected).swapaxes(0, 1)
expected.major_axis.name, expected.minor_axis.name = 'A', 'B'
# a little bit crude
for col in ['C', 'D']:
result_col = op(grouped[col])
exp = expected[col]
pivoted = result1[col].unstack()
pivoted2 = result_col.unstack()
assert_frame_equal(pivoted.reindex_like(exp), exp)
assert_frame_equal(pivoted2.reindex_like(exp), exp)
_check_op(lambda x: x.sum())
_check_op(lambda x: x.mean())
# test single series works the same
result = data['C'].groupby([data['A'], data['B']]).mean()
expected = data.groupby(['A', 'B']).mean()['C']
assert_series_equal(result, expected)
def test_groupby_as_index_agg(self):
grouped = self.df.groupby('A', as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
grouped = self.df.groupby('A', as_index=True)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# multi-key
grouped = self.df.groupby(['A', 'B'], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
result2 = grouped.agg(OrderedDict([['C', np.mean], ['D', np.sum]]))
expected2 = grouped.mean()
expected2['D'] = grouped.sum()['D']
assert_frame_equal(result2, expected2)
expected3 = grouped['C'].sum()
expected3 = DataFrame(expected3).rename(columns={'C': 'Q'})
result3 = grouped['C'].agg({'Q': np.sum})
assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)),
columns=['jim', 'joe', 'jolie'])
ts = Series(np.random.randint(5, 10, 50), name='jim')
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ['mean', 'max', 'count', 'idxmax', 'cumsum', 'all']:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
assert_frame_equal(left, right)
def test_series_groupby_nunique(self):
from itertools import product
from string import ascii_lowercase
def check_nunique(df, keys):
for sort, dropna in product((False, True), repeat=2):
gr = df.groupby(keys, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
assert_series_equal(left, right)
days = date_range('2015-08-23', periods=10)
for n, m in product(10 ** np.arange(2, 6), (10, 100, 1000)):
frame = DataFrame({
'jim': np.random.choice(
list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)
})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
def test_series_groupby_value_counts(self):
from itertools import product
def rebuild_index(df):
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
def check_value_counts(df, keys, bins):
for isort, normalize, sort, ascending, dropna \
in product((False, True), repeat=5):
kwargs = dict(normalize=normalize, sort=sort,
ascending=ascending, dropna=dropna, bins=bins)
gr = df.groupby(keys, sort=isort)
left = gr['3rd'].value_counts(**kwargs)
gr = df.groupby(keys, sort=isort)
right = gr['3rd'].apply(Series.value_counts, **kwargs)
right.index.names = right.index.names[:-1] + ['3rd']
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
assert_series_equal(left.sort_index(), right.sort_index())
def loop(df):
bins = None, np.arange(0, max(5, df['3rd'].max()) + 1, 2)
keys = '1st', '2nd', ('1st', '2nd')
for k, b in product(keys, bins):
check_value_counts(df, k, b)
days = date_range('2015-08-24', periods=10)
for n, m in product((100, 1000), (5, 20)):
frame = DataFrame({
'1st': np.random.choice(
list('abcd'), n),
'2nd': np.random.choice(days, n),
'3rd': np.random.randint(1, m + 1, n)
})
loop(frame)
frame.loc[1::11, '1st'] = nan
frame.loc[3::17, '2nd'] = nan
frame.loc[7::19, '3rd'] = nan
frame.loc[8::19, '3rd'] = nan
frame.loc[9::19, '3rd'] = nan
loop(frame)
def test_mulitindex_passthru(self):
# GH 7997
# regression from 0.14.1
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
df.columns = pd.MultiIndex.from_tuples([(0, 1), (1, 1), (2, 1)])
result = df.groupby(axis=1, level=[0, 1]).first()
assert_frame_equal(result, df)
def test_multifunc_select_col_integer_cols(self):
df = self.df
df.columns = np.arange(len(df.columns))
# it works!
df.groupby(1, as_index=False)[2].agg({'Q': np.mean})
def test_as_index_series_return_frame(self):
grouped = self.df.groupby('A', as_index=False)
grouped2 = self.df.groupby(['A', 'B'], as_index=False)
result = grouped['C'].agg(np.sum)
expected = grouped.agg(np.sum).ix[:, ['A', 'C']]
tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].agg(np.sum)
expected2 = grouped2.agg(np.sum).ix[:, ['A', 'B', 'C']]
tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
result = grouped['C'].sum()
expected = grouped.sum().ix[:, ['A', 'C']]
tm.assertIsInstance(result, DataFrame)
assert_frame_equal(result, expected)
result2 = grouped2['C'].sum()
expected2 = grouped2.sum().ix[:, ['A', 'B', 'C']]
tm.assertIsInstance(result2, DataFrame)
assert_frame_equal(result2, expected2)
# corner case
self.assertRaises(Exception, grouped['C'].__getitem__, 'D')
def test_groupby_as_index_cython(self):
data = self.df
# single-key
grouped = data.groupby('A', as_index=False)
result = grouped.mean()
expected = data.groupby(['A']).mean()
expected.insert(0, 'A', expected.index)
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(['A', 'B'], as_index=False)
result = grouped.mean()
expected = data.groupby(['A', 'B']).mean()
arrays = lzip(*expected.index._tuple_index)
expected.insert(0, 'A', arrays[0])
expected.insert(1, 'B', arrays[1])
expected.index = np.arange(len(expected))
assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(self):
grouped = self.df.groupby(['A', 'B'], as_index=False)
# GH #421
result = grouped['C'].agg(len)
expected = grouped.agg(len).ix[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_groupby_as_index_corner(self):
self.assertRaises(TypeError, self.ts.groupby, lambda x: x.weekday(),
as_index=False)
self.assertRaises(ValueError, self.df.groupby, lambda x: x.lower(),
as_index=False, axis=1)
def test_groupby_as_index_apply(self):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1, 2, 1, 1, 3, 1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
assert_index_equal(res_as, exp)
assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
assert_index_equal(res_as_apply, exp_as_apply)
assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
assert_index_equal(res, ind)
def test_groupby_head_tail(self):
df = DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B'])
g_as = df.groupby('A', as_index=True)
g_not_as = df.groupby('A', as_index=False)
# as_index= False, much easier
assert_frame_equal(df.loc[[0, 2]], g_not_as.head(1))
assert_frame_equal(df.loc[[1, 2]], g_not_as.tail(1))
empty_not_as = DataFrame(columns=df.columns, index=pd.Index(
[], dtype=df.index.dtype))
empty_not_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_not_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_not_as, g_not_as.head(0))
assert_frame_equal(empty_not_as, g_not_as.tail(0))
assert_frame_equal(empty_not_as, g_not_as.head(-1))
assert_frame_equal(empty_not_as, g_not_as.tail(-1))
assert_frame_equal(df, g_not_as.head(7)) # contains all
assert_frame_equal(df, g_not_as.tail(7))
# as_index=True, (used to be different)
df_as = df
assert_frame_equal(df_as.loc[[0, 2]], g_as.head(1))
assert_frame_equal(df_as.loc[[1, 2]], g_as.tail(1))
empty_as = DataFrame(index=df_as.index[:0], columns=df.columns)
empty_as['A'] = empty_not_as['A'].astype(df.A.dtype)
empty_as['B'] = empty_not_as['B'].astype(df.B.dtype)
assert_frame_equal(empty_as, g_as.head(0))
assert_frame_equal(empty_as, g_as.tail(0))
assert_frame_equal(empty_as, g_as.head(-1))
assert_frame_equal(empty_as, g_as.tail(-1))
assert_frame_equal(df_as, g_as.head(7)) # contains all
assert_frame_equal(df_as, g_as.tail(7))
# test with selection
assert_frame_equal(g_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
assert_frame_equal(g_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
assert_frame_equal(g_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
assert_frame_equal(g_not_as[[]].head(1), df_as.loc[[0, 2], []])
assert_frame_equal(g_not_as[['A']].head(1), df_as.loc[[0, 2], ['A']])
assert_frame_equal(g_not_as[['B']].head(1), df_as.loc[[0, 2], ['B']])
assert_frame_equal(g_not_as[['A', 'B']].head(1), df_as.loc[[0, 2]])
def test_groupby_multiple_key(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day])
agged = grouped.sum()
assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby([lambda x: x.year,
lambda x: x.month,
lambda x: x.day], axis=1)
agged = grouped.agg(lambda x: x.sum())
self.assertTrue(agged.index.equals(df.columns))
assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(self):
# test that having an all-NA column doesn't mess you up
df = self.df.copy()
df['bad'] = np.nan
agged = df.groupby(['A', 'B']).mean()
expected = self.df.groupby(['A', 'B']).mean()
expected['bad'] = np.nan
assert_frame_equal(agged, expected)
def test_omit_nuisance(self):
grouped = self.df.groupby('A')
result = grouped.mean()
expected = self.df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
df = self.df.ix[:, ['A', 'C', 'D']]
df['E'] = datetime.now()
grouped = df.groupby('A')
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1)
result = self.assertRaises(TypeError, grouped.agg,
lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(self):
grouped = self.three_group.groupby(['A', 'B'])
agged = grouped.agg(np.mean)
exp = grouped.mean()
assert_frame_equal(agged, exp)
def test_empty_groups_corner(self):
# handle empty groups
df = DataFrame({'k1': np.array(['b', 'b', 'b', 'a', 'a', 'a']),
'k2': np.array(['1', '1', '1', '2', '2', '2']),
'k3': ['foo', 'bar'] * 3,
'v1': np.random.randn(6),
'v2': np.random.randn(6)})
grouped = df.groupby(['k1', 'k2'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
grouped = self.mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped['A'].apply(np.mean)
assert_series_equal(agged['A'], agged_A)
self.assertEqual(agged.index.name, 'first')
def test_apply_concat_preserve_names(self):
grouped = self.three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
self.assertEqual(result.index.names, ('A', 'B', 'stat'))
result2 = grouped.apply(desc2)
self.assertEqual(result2.index.names, ('A', 'B', 'stat'))
result3 = grouped.apply(desc3)
self.assertEqual(result3.index.names, ('A', 'B', None))
def test_nonsense_func(self):
df = DataFrame([0])
self.assertRaises(Exception, df.groupby, lambda x: x + 'foo')
def test_builtins_apply(self): # GH8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=['jim', 'joe'])
df['jolie'] = np.random.randn(1000)
for keys in ['jim', ['jim', 'joe']]: # single key & multi-key
if keys == 'jim':
continue
for f in [max, min, sum]:
fname = f.__name__
result = df.groupby(keys).apply(f)
result.shape
ngroups = len(df.drop_duplicates(subset=keys))
assert result.shape == (ngroups, 3), 'invalid frame shape: '\
'{} (expected ({}, 3))'.format(result.shape, ngroups)
assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
assert_frame_equal(result, expected, check_dtype=False)
assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_cythonized_aggers(self):
data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan],
'B': ['A', 'B'] * 6,
'C': np.random.randn(12)}
df = DataFrame(data)
df.loc[2:10:2, 'C'] = nan
def _testit(name):
op = lambda x: getattr(x, name)()
# single column
grouped = df.drop(['B'], axis=1).groupby('A')
exp = {}
for cat, group in grouped:
exp[cat] = op(group['C'])
exp = DataFrame({'C': exp})
exp.index.name = 'A'
result = op(grouped)
assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(['A', 'B'])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group['C'])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ['A', 'B']
exp.name = 'C'
result = op(grouped)['C']
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result, exp)
_testit('count')
_testit('sum')
_testit('std')
_testit('var')
_testit('sem')
_testit('mean')
_testit('median')
_testit('prod')
_testit('min')
_testit('max')
def test_max_min_non_numeric(self):
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
self.assertTrue('ss' in result)
result = aa.groupby('nn').min()
self.assertTrue('ss' in result)
def test_cython_agg_boolean(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': np.random.randint(0, 2, 50).astype('bool')})
result = frame.groupby('a')['b'].mean()
expected = frame.groupby('a')['b'].agg(np.mean)
assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
self.assertRaises(DataError, frame.groupby('a')['b'].mean)
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
self.assertRaises(DataError, frame[['b']].groupby(frame['a']).mean)
def test_cython_agg_nothing_to_agg_with_dates(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25,
'dates': pd.date_range('now', periods=50,
freq='T')})
with tm.assertRaisesRegexp(DataError, "No numeric types to aggregate"):
frame.groupby('b').dates.mean()
def test_groupby_timedelta_cython_count(self):
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_cython_agg_frame_columns(self):
# #2113
df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]})
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
def test_wrap_aggregated_output_multindex(self):
df = self.mframe.T
df['baz', 'two'] = 'peekaboo'
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
tm.assertIsInstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ('foo', 'one'):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
self.assertEqual(len(agged2.columns) + 1, len(df.columns))
def test_groupby_level(self):
frame = self.mframe
deleveled = frame.reset_index()
result0 = frame.groupby(level=0).sum()
result1 = frame.groupby(level=1).sum()
expected0 = frame.groupby(deleveled['first'].values).sum()
expected1 = frame.groupby(deleveled['second'].values).sum()
expected0 = expected0.reindex(frame.index.levels[0])
expected1 = expected1.reindex(frame.index.levels[1])
self.assertEqual(result0.index.name, 'first')
self.assertEqual(result1.index.name, 'second')
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
self.assertEqual(result0.index.name, frame.index.names[0])
self.assertEqual(result1.index.name, frame.index.names[1])
# groupby level name
result0 = frame.groupby(level='first').sum()
result1 = frame.groupby(level='second').sum()
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
# axis=1
result0 = frame.T.groupby(level=0, axis=1).sum()
result1 = frame.T.groupby(level=1, axis=1).sum()
assert_frame_equal(result0, expected0.T)
assert_frame_equal(result1, expected1.T)
# raise exception for non-MultiIndex
self.assertRaises(ValueError, self.df.groupby, level=1)
def test_groupby_level_index_names(self):
# GH4014 this used to raise ValueError since 'exp'>1 (in py2)
df = DataFrame({'exp': ['A'] * 3 + ['B'] * 3,
'var1': lrange(6), }).set_index('exp')
df.groupby(level='exp')
self.assertRaises(ValueError, df.groupby, level='foo')
def test_groupby_level_with_nas(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1,
2, 3]])
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
result = s.groupby(level=0).sum()
expected = Series([22., 6.], index=[1, 0])
assert_series_equal(result, expected)
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
# factorizing doesn't confuse things
s = Series(np.arange(8.), index=index)
result = s.groupby(level=0).sum()
expected = Series([18., 6.], index=[1, 0])
assert_series_equal(result, expected)
def test_groupby_level_apply(self):
frame = self.mframe
result = frame.groupby(level=0).count()
self.assertEqual(result.index.name, 'first')
result = frame.groupby(level=1).count()
self.assertEqual(result.index.name, 'second')
result = frame['A'].groupby(level=0).count()
self.assertEqual(result.index.name, 'first')
def test_groupby_args(self):
# PR8618 and issue 8015
frame = self.mframe
def j():
frame.groupby()
self.assertRaisesRegexp(TypeError,
"You have to supply one of 'by' and 'level'",
j)
def k():
frame.groupby(by=None, level=None)
self.assertRaisesRegexp(TypeError,
"You have to supply one of 'by' and 'level'",
k)
def test_groupby_level_mapper(self):
frame = self.mframe
deleveled = frame.reset_index()
mapper0 = {'foo': 0, 'bar': 0, 'baz': 1, 'qux': 1}
mapper1 = {'one': 0, 'two': 0, 'three': 1}
result0 = frame.groupby(mapper0, level=0).sum()
result1 = frame.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled['first']])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled['second']])
expected0 = frame.groupby(mapped_level0).sum()
expected1 = frame.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = 'first', 'second'
assert_frame_equal(result0, expected0)
assert_frame_equal(result1, expected1)
def test_groupby_level_0_nonmulti(self):
# #1313
a = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1,
4, 5, 2, 6], name='foo'))
result = a.groupby(level=0).sum()
self.assertEqual(result.index.name, a.index.name)
def test_groupby_complex(self):
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
assert_series_equal(result, expected)
result = a.sum(level=0)
assert_series_equal(result, expected)
def test_level_preserve_order(self):
grouped = self.mframe.groupby(level=0)
exp_labels = np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3])
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
def test_grouping_labels(self):
grouped = self.mframe.groupby(self.mframe.index.get_level_values(0))
exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3])
assert_almost_equal(grouped.grouper.labels[0], exp_labels)
def test_cython_fail_agg(self):
dr = bdate_range('1/1/2000', periods=50)
ts = Series(['A', 'B', 'C', 'D', 'E'] * 10, index=dr)
grouped = ts.groupby(lambda x: x.month)
summed = grouped.sum()
expected = grouped.agg(np.sum)
assert_series_equal(summed, expected)
def test_apply_series_to_frame(self):
def f(piece):
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': np.log(piece)})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
tm.assertIsInstance(result, DataFrame)
self.assertTrue(result.index.equals(ts.index))
def test_apply_series_yield_constant(self):
result = self.df.groupby(['A', 'B'])['C'].apply(len)
self.assertEqual(result.index.names[:2], ('A', 'B'))
def test_apply_frame_to_series(self):
grouped = self.df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
self.assertTrue(result.index.equals(expected.index))
self.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series(self):
def trans(group):
return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
assert_series_equal(result, exp, check_names=False)
self.assertEqual(result.name, 'C')
def test_apply_transform(self):
grouped = self.ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(result, expected)
def test_apply_multikey_corner(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
def f(group):
return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
assert_frame_equal(result.ix[key], f(group))
def test_mutate_groups(self):
# GH3380
mydf = DataFrame({
'cat1': ['a'] * 8 + ['b'] * 6,
'cat2': ['c'] * 2 + ['d'] * 2 + ['e'] * 2 + ['f'] * 2 + ['c'] * 2 +
['d'] * 2 + ['e'] * 2,
'cat3': lmap(lambda x: 'g%s' % x, lrange(1, 15)),
'val': np.random.randint(100, size=14),
})
def f_copy(x):
x = x.copy()
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
def f_no_copy(x):
x['rank'] = x.val.rank(method='min')
return x.groupby('cat2')['rank'].min()
grpby_copy = mydf.groupby('cat1').apply(f_copy)
grpby_no_copy = mydf.groupby('cat1').apply(f_no_copy)
assert_series_equal(grpby_copy, grpby_no_copy)
def test_no_mutate_but_looks_like(self):
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3], 'value': range(9)})
result1 = df.groupby('key', group_keys=True).apply(lambda x: x[:].key)
result2 = df.groupby('key', group_keys=True).apply(lambda x: x.key)
assert_series_equal(result1, result2)
def test_apply_chunk_view(self):
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict(self):
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': lrange(10)[::-1]})
# it works! #2605
grouped = df.groupby(['name', 'name2'])
grouped.apply(lambda x: x.sort_values('value', inplace=True))
def test_groupby_series_indexed_differently(self):
s1 = Series([5.0, -9.0, 4.0, 100., -5., 55., 6.7],
index=Index(['a', 'b', 'c', 'd', 'e', 'f', 'g']))
s2 = Series([1.0, 1.0, 4.0, 5.0, 5.0, 7.0],
index=Index(['a', 'b', 'd', 'f', 'g', 'h']))
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
assert_series_equal(agged, exp)
def test_groupby_with_hier_columns(self):
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux',
'qux'], ['one', 'two', 'one', 'two', 'one', 'two',
'one', 'two']]))
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples([('A', 'cat'), ('B', 'dog'), (
'B', 'cat'), ('A', 'dog')])
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0, axis=1).mean()
self.assertTrue(result.index.equals(df.index))
result = df.groupby(level=0).agg(np.mean)
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0).apply(lambda x: x.mean())
self.assertTrue(result.columns.equals(columns))
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
self.assertTrue(result.columns.equals(Index(['A', 'B'])))
self.assertTrue(result.index.equals(df.index))
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df['A', 'foo'] = 'bar'
result = df.groupby(level=0).mean()
self.assertTrue(result.columns.equals(df.columns[:-1]))
def test_pass_args_kwargs(self):
from numpy import percentile
def f(x, q=None, axis=0):
return percentile(x, q, axis=axis)
g = lambda x: percentile(x, 80, axis=0)
# Series
ts_grouped = self.ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(percentile, 80, axis=0)
apply_result = ts_grouped.apply(percentile, 80, axis=0)
trans_result = ts_grouped.transform(percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(.8)
trans_expected = ts_grouped.transform(g)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(agg_result, agg_expected)
assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
assert_series_equal(agg_result, agg_expected)
assert_series_equal(apply_result, agg_expected)
assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = self.tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, .8)
expected = df_grouped.quantile(.8)
assert_frame_equal(apply_result, expected)
assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=.8)
assert_frame_equal(agg_result, expected)
assert_frame_equal(apply_result, expected)
def test_size(self):
grouped = self.df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
grouped = self.df.groupby('A')
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
grouped = self.df.groupby('B')
result = grouped.size()
for key, group in grouped:
self.assertEqual(result[key], len(group))
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
assert_series_equal(left, right, check_names=False)
# GH11699
df = DataFrame([], columns=['A', 'B'])
out = Series([], dtype='int64', index=Index([], name='A'))
assert_series_equal(df.groupby('A').size(), out)
def test_count(self):
from string import ascii_lowercase
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'], [1, nan, 'bar'], [3, nan, nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
assert_frame_equal(count_not_as, expected.reset_index())
assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
assert_series_equal(count_B, expected['B'])
def test_count_object(self):
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type(self): # GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_non_cython_api(self):
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'], [1,
nan,
'bar', ], [3, nan, 'baz']
], columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
assert_frame_equal(result, expected)
# describe
expected = DataFrame(dict(B=concat(
[df.loc[[0, 1], 'B'].describe(), df.loc[[2], 'B'].describe()],
keys=[1, 3])))
expected.index.names = ['A', None]
result = g.describe()
assert_frame_equal(result, expected)
expected = concat(
[df.loc[[0, 1], ['A', 'B']].describe(),
df.loc[[2], ['A', 'B']].describe()], keys=[0, 1])
result = gni.describe()
assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0], [nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
assert_frame_equal(result, expected)
def test_cython_api2(self):
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
assert_frame_equal(result, expected)
def test_grouping_ndarray(self):
grouped = self.df.groupby(self.df['A'].values)
result = grouped.sum()
expected = self.df.groupby('A').sum()
assert_frame_equal(result, expected, check_names=False
) # Note: no names when grouping by value
def test_agg_consistency(self):
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except:
return np.nan
import datetime as dt
df = DataFrame({'col1': [1, 2, 3, 4],
'col2': [10, 25, 26, 31],
'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10),
dt.date(2013, 2, 11), dt.date(2013, 2, 11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
assert_frame_equal(result, expected)
def test_apply_typecast_fail(self):
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(
['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)})
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
assert_frame_equal(result, expected)
def test_apply_multiindex_fail(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
assert_frame_equal(result, expected)
def test_apply_corner(self):
result = self.tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
expected = self.tsframe * 2
assert_frame_equal(result, expected)
def test_apply_without_copy(self):
# GH 5545
# returning a non-copy in an applied function fails
data = DataFrame({'id_field': [100, 100, 200, 300],
'category': ['a', 'b', 'c', 'c'],
'value': [1, 2, 3, 4]})
def filt1(x):
if x.shape[0] == 1:
return x.copy()
else:
return x[x.category == 'c']
def filt2(x):
if x.shape[0] == 1:
return x
else:
return x[x.category == 'c']
expected = data.groupby('id_field').apply(filt1)
result = data.groupby('id_field').apply(filt2)
assert_frame_equal(result, expected)
def test_apply_use_categorical_name(self):
from pandas import qcut
cats = qcut(self.df.C, 4)
def get_stats(group):
return {'min': group.min(),
'max': group.max(),
'count': group.count(),
'mean': group.mean()}
result = self.df.groupby(cats).D.apply(get_stats)
self.assertEqual(result.index.names[0], 'C')
def test_apply_categorical_data(self):
# GH 10138
for ordered in [True, False]:
dense = Categorical(list('abc'), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(
list('aaa'), categories=['a', 'b'], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({'missing': missing,
'dense': dense,
'values': values})
grouped = df.groupby(['missing', 'dense'])
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_product([['a', 'b'], ['a', 'b', 'c']],
names=['missing', 'dense'])
expected = DataFrame([0, 1, 2, np.nan, np.nan, np.nan],
index=idx,
columns=['values'])
assert_frame_equal(grouped.apply(lambda x: np.mean(x)), expected)
assert_frame_equal(grouped.mean(), expected)
assert_frame_equal(grouped.agg(np.mean), expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_product([['a'], ['a', 'b', 'c']],
names=['missing', 'dense'])
expected = Series(1, index=idx)
assert_series_equal(grouped.apply(lambda x: 1), expected)
def test_apply_corner_cases(self):
# #535, can't use sliding iterator
N = 1000
labels = np.random.randint(0, 100, size=N)
df = DataFrame({'key': labels,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
grouped = df.groupby('key')
def f(g):
g['value3'] = g['value1'] * 2
return g
result = grouped.apply(f)
self.assertTrue('value3' in result)
def test_transform_mixed_type(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
self.assertEqual(result['d'].dtype, np.float64)
# this is by definition a mutating operation!
with option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.ix[key])
def test_groupby_wrong_multi_labels(self):
from pandas import read_csv
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(['foo', 'bar', 'baz', 'spam'])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_groupby_series_with_name(self):
result = self.df.groupby(self.df['A']).mean()
result2 = self.df.groupby(self.df['A'], as_index=False).mean()
self.assertEqual(result.index.name, 'A')
self.assertIn('A', result2)
result = self.df.groupby([self.df['A'], self.df['B']]).mean()
result2 = self.df.groupby([self.df['A'], self.df['B']],
as_index=False).mean()
self.assertEqual(result.index.names, ('A', 'B'))
self.assertIn('A', result2)
self.assertIn('B', result2)
def test_seriesgroupby_name_attr(self):
# GH 6265
result = self.df.groupby('A')['C']
self.assertEqual(result.count().name, 'C')
self.assertEqual(result.mean().name, 'C')
testFunc = lambda x: np.sum(x) * 2
self.assertEqual(result.agg(testFunc).name, 'C')
def test_consistency_name(self):
# GH 12363
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
expected = df.groupby(['A']).B.count()
result = df.B.groupby(df.A).count()
assert_series_equal(result, expected)
def test_groupby_name_propagation(self):
# GH 6124
def summarize(df, name=None):
return Series({'count': 1, 'mean': 2, 'omissions': 3, }, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({
'count': 1,
'mean': 2,
'omissions': 3,
}, name=df.iloc[0]['A'])
metrics = self.df.groupby('A').apply(summarize)
self.assertEqual(metrics.columns.name, None)
metrics = self.df.groupby('A').apply(summarize, 'metrics')
self.assertEqual(metrics.columns.name, 'metrics')
metrics = self.df.groupby('A').apply(summarize_random_name)
self.assertEqual(metrics.columns.name, None)
def test_groupby_nonstring_columns(self):
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
assert_frame_equal(result, expected)
def test_cython_grouper_series_bug_noncontig(self):
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0], index=lrange(100))
inds = np.tile(lrange(10), 10)
result = obj.groupby(inds).agg(Series.median)
self.assertTrue(result.isnull().all())
def test_series_grouper_noncontig_index(self):
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone(self):
from decimal import Decimal
s = Series(lrange(5))
labels = np.array(['a', 'b', 'c', 'd', 'e'], dtype='O')
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert (len(x.base) > 0)
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
def test_fast_apply(self):
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
self.assertFalse(mutated)
def test_apply_with_mixed_dtype(self):
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1': ['one', 'two', 'two', 'three', 'one', 'two'],
'foo2': np.random.randn(6)})
result = df.apply(lambda x: x, axis=1)
assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1": [1, 2, 6, 6, 8]})
df["c2"] = df.c1 / 2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
assert_series_equal(result1, result2)
def test_groupby_aggregation_mixed_dtype(self):
# GH 6212
expected = DataFrame({
'v1': [5, 5, 7, np.nan, 3, 3, 4, 1],
'v2': [55, 55, 77, np.nan, 33, 33, 44, 11]},
index=MultiIndex.from_tuples([(1, 95), (1, 99), (2, 95), (2, 99),
('big', 'damp'),
('blue', 'dry'),
('red', 'red'), ('red', 'wet')],
names=['by1', 'by2']))
df = DataFrame({
'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan,
12],
'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99,
np.nan, np.nan]
})
g = df.groupby(['by1', 'by2'])
result = g[['v1', 'v2']].mean()
assert_frame_equal(result, expected)
def test_groupby_dtype_inference_empty(self):
# GH 6733
df = DataFrame({'x': [], 'range': np.arange(0, dtype='int64')})
self.assertEqual(df['x'].dtype, np.float64)
result = df.groupby('x').first()
exp_index = Index([], name='x', dtype=np.float64)
expected = DataFrame({'range': Series(
[], index=exp_index, dtype='int64')})
assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(self):
result = self.df.groupby(list(self.df['A'])).mean()
expected = self.df.groupby(self.df['A']).mean()
assert_frame_equal(result, expected, check_names=False)
self.assertRaises(Exception, self.df.groupby, list(self.df['A'][:-1]))
# pathological case of ambiguity
df = DataFrame({'foo': [0, 1],
'bar': [3, 4],
'val': np.random.randn(2)})
result = df.groupby(['foo', 'bar']).mean()
expected = df.groupby([df['foo'], df['bar']]).mean()[['val']]
def test_groupby_keys_same_size_as_index(self):
# GH 11185
freq = 's'
index = pd.date_range(start=pd.Timestamp('2015-09-29T11:34:44-0700'),
periods=2, freq=freq)
df = pd.DataFrame([['A', 10], ['B', 15]], columns=[
'metric', 'values'
], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), 'metric']).mean()
expected = df.set_index([df.index, 'metric'])
assert_frame_equal(result, expected)
def test_groupby_one_row(self):
# GH 11741
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list('ABCD'))
self.assertRaises(KeyError, df1.groupby, 'Z')
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list('ABCD'))
self.assertRaises(KeyError, df2.groupby, 'Z')
def test_groupby_nat_exclude(self):
# GH 6992
df = pd.DataFrame(
{'values': np.random.randn(8),
'dt': [np.nan, pd.Timestamp('2013-01-01'), np.nan, pd.Timestamp(
'2013-02-01'), np.nan, pd.Timestamp('2013-02-01'), np.nan,
pd.Timestamp('2013-01-01')],
'str': [np.nan, 'a', np.nan, 'a', np.nan, 'a', np.nan, 'b']})
grouped = df.groupby('dt')
expected = [[1, 7], [3, 5]]
keys = sorted(grouped.groups.keys())
self.assertEqual(len(keys), 2)
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
self.assertEqual(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
self.assertEqual(grouped.ngroups, 2)
expected = {Timestamp('2013-01-01 00:00:00'): np.array([1, 7]),
Timestamp('2013-02-01 00:00:00'): np.array([3, 5])}
for k in grouped.indices:
self.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(
grouped.get_group(Timestamp('2013-01-01')), df.iloc[[1, 7]])
tm.assert_frame_equal(
grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]])
self.assertRaises(KeyError, grouped.get_group, pd.NaT)
nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan],
'nat': [pd.NaT, pd.NaT, pd.NaT]})
self.assertEqual(nan_df['nan'].dtype, 'float64')
self.assertEqual(nan_df['nat'].dtype, 'datetime64[ns]')
for key in ['nan', 'nat']:
grouped = nan_df.groupby(key)
self.assertEqual(grouped.groups, {})
self.assertEqual(grouped.ngroups, 0)
self.assertEqual(grouped.indices, {})
self.assertRaises(KeyError, grouped.get_group, np.nan)
self.assertRaises(KeyError, grouped.get_group, pd.NaT)
def test_dictify(self):
dict(iter(self.df.groupby('A')))
dict(iter(self.df.groupby(['A', 'B'])))
dict(iter(self.df['C'].groupby(self.df['A'])))
dict(iter(self.df['C'].groupby([self.df['A'], self.df['B']])))
dict(iter(self.df.groupby('A')['C']))
dict(iter(self.df.groupby(['A', 'B'])['C']))
def test_sparse_friendly(self):
sdf = self.df[['C', 'D']].to_sparse()
panel = tm.makePanel()
tm.add_nans(panel)
def _check_work(gp):
gp.mean()
gp.agg(np.mean)
dict(iter(gp))
# it works!
_check_work(sdf.groupby(lambda x: x // 2))
_check_work(sdf['C'].groupby(lambda x: x // 2))
_check_work(sdf.groupby(self.df['A']))
# do this someday
# _check_work(panel.groupby(lambda x: x.month, axis=1))
def test_panel_groupby(self):
self.panel = tm.makePanel()
tm.add_nans(self.panel)
grouped = self.panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},
axis='items')
agged = grouped.mean()
agged2 = grouped.agg(lambda x: x.mean('items'))
tm.assert_panel_equal(agged, agged2)
self.assert_numpy_array_equal(agged.items, [0, 1])
grouped = self.panel.groupby(lambda x: x.month, axis='major')
agged = grouped.mean()
self.assert_numpy_array_equal(agged.major_axis, sorted(list(set(
self.panel.major_axis.month))))
grouped = self.panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis='minor')
agged = grouped.mean()
self.assert_numpy_array_equal(agged.minor_axis, [0, 1])
def test_numpy_groupby(self):
from pandas.core.groupby import numpy_groupby
data = np.random.randn(100, 100)
labels = np.random.randint(0, 10, size=100)
df = DataFrame(data)
result = df.groupby(labels).sum().values
expected = numpy_groupby(data, labels)
assert_almost_equal(result, expected)
result = df.groupby(labels, axis=1).sum().values
expected = numpy_groupby(data, labels, axis=1)
assert_almost_equal(result, expected)
def test_groupby_2d_malformed(self):
d = DataFrame(index=lrange(2))
d['group'] = ['g1', 'g2']
d['zeros'] = [0, 0]
d['ones'] = [1, 1]
d['label'] = ['l1', 'l2']
tmp = d.groupby(['group']).mean()
res_values = np.array([[0., 1.], [0., 1.]])
self.assert_numpy_array_equal(tmp.columns, ['zeros', 'ones'])
self.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow(self):
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)
))
A = np.arange(25000)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': np.random.randn(25000)})
left = df.groupby(['A', 'B', 'C', 'D']).sum()
right = df.groupby(['D', 'C', 'B', 'A']).sum()
self.assertEqual(len(left), len(right))
def test_int64_overflow(self):
from pandas.core.groupby import _int64_overflow_possible
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': A,
'F': B,
'G': A,
'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel(0)
self.assertTrue(left.index.equals(exp_index))
exp_index, _ = right.index.sortlevel(0)
self.assertTrue(right.index.equals(exp_index))
tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
]].values))
tups = com._asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
self.assertEqual(left[k], right[k[::-1]])
self.assertEqual(left[k], v)
self.assertEqual(len(left), len(right))
# GH9096
values = range(55109)
data = pd.DataFrame.from_dict({'a': values,
'b': values,
'c': values,
'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
self.assertEqual(len(grouped), len(values))
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
self.assertTrue(_int64_overflow_possible(gr.grouper.shape))
# mannually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
self.assertEqual(len(gr), len(jim))
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_groupby_sort_multi(self):
df = DataFrame({'a': ['foo', 'bar', 'baz'],
'b': [3, 2, 1],
'c': [0, 1, 2],
'd': np.random.randn(3)})
tups = lmap(tuple, df[['a', 'b', 'c']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['a', 'b', 'c'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = lmap(tuple, df[['c', 'a', 'b']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['c', 'a', 'b'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values, tups)
tups = lmap(tuple, df[['b', 'c', 'a']].values)
tups = com._asarray_tuplesafe(tups)
result = df.groupby(['b', 'c', 'a'], sort=True).sum()
self.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame({'a': [0, 1, 2, 0, 1, 2],
'b': [0, 0, 0, 1, 1, 1],
'd': np.random.randn(6)})
grouped = df.groupby(['a', 'b'])['d']
result = grouped.sum()
_check_groupby(df, result, ['a', 'b'], 'd')
def test_intercept_builtin_sum(self):
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_column_select_via_attr(self):
result = self.df.groupby('A').C.sum()
expected = self.df.groupby('A')['C'].sum()
assert_series_equal(result, expected)
self.df['mean'] = 1.5
result = self.df.groupby('A').mean()
expected = self.df.groupby('A').agg(np.mean)
assert_frame_equal(result, expected)
def test_rank_apply(self):
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame({'value': np.random.randn(500),
'key1': lev1.take(lab1),
'key2': lev2.take(lab2)})
result = df.groupby(['key1', 'key2']).value.rank()
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank())
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
assert_series_equal(result, expected)
result = df.groupby(['key1', 'key2']).value.rank(pct=True)
expected = []
for key, piece in df.groupby(['key1', 'key2']):
expected.append(piece.value.rank(pct=True))
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
assert_series_equal(result, expected)
def test_dont_clobber_name_column(self):
df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'],
'name': ['foo', 'bar', 'baz'] * 2})
result = df.groupby('key').apply(lambda x: x)
assert_frame_equal(result, df)
def test_skip_group_keys(self):
from pandas import concat
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by='A')[:3])
pieces = []
for key, group in grouped:
pieces.append(group.sort_values(by='A')[:3])
expected = concat(pieces)
assert_frame_equal(result, expected)
grouped = tsf['A'].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = []
for key, group in grouped:
pieces.append(group.sort_values()[:3])
expected = concat(pieces)
assert_series_equal(result, expected)
def test_no_nonsense_name(self):
# GH #995
s = self.frame['C'].copy()
s.name = None
result = s.groupby(self.frame['A']).agg(np.sum)
self.assertIsNone(result.name)
def test_wrap_agg_out(self):
grouped = self.three_group.groupby(['A', 'B'])
def func(ser):
if ser.dtype == np.object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = self.three_group.ix[:, self.three_group.columns != 'C']
expected = exp_grouped.groupby(['A', 'B']).aggregate(func)
assert_frame_equal(result, expected)
def test_multifunc_sum_bug(self):
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x['test'] = 0
x['fl'] = [1.3, 1.5, 1.6]
grouped = x.groupby('test')
result = grouped.agg({'fl': 'sum', 2: 'size'})
self.assertEqual(result['fl'].dtype, np.float64)
def test_handle_dict_return_value(self):
def f(group):
return {'min': group.min(), 'max': group.max()}
def g(group):
return Series({'min': group.min(), 'max': group.max()})
result = self.df.groupby('A')['C'].apply(f)
expected = self.df.groupby('A')['C'].apply(g)
tm.assertIsInstance(result, Series)
assert_series_equal(result, expected)
def test_getitem_list_of_columns(self):
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': np.random.randn(8)})
result = df.groupby('A')[['C', 'D']].mean()
result2 = df.groupby('A')['C', 'D'].mean()
result3 = df.groupby('A')[df.columns[2:4]].mean()
expected = df.ix[:, ['A', 'C', 'D']].groupby('A').mean()
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected)
def test_agg_multiple_functions_maintain_order(self):
# GH #610
funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)]
result = self.df.groupby('A')['C'].agg(funcs)
exp_cols = ['mean', 'max', 'min']
self.assert_numpy_array_equal(result.columns, exp_cols)
def test_multiple_functions_tuples_and_non_tuples(self):
# #1359
funcs = [('foo', 'mean'), 'std']
ex_funcs = [('foo', 'mean'), ('std', 'std')]
result = self.df.groupby('A')['C'].agg(funcs)
expected = self.df.groupby('A')['C'].agg(ex_funcs)
assert_frame_equal(result, expected)
result = self.df.groupby('A').agg(funcs)
expected = self.df.groupby('A').agg(ex_funcs)
assert_frame_equal(result, expected)
def test_agg_multiple_functions_too_many_lambdas(self):
grouped = self.df.groupby('A')
funcs = ['mean', lambda x: x.mean(), lambda x: x.std()]
self.assertRaises(SpecificationError, grouped.agg, funcs)
def test_more_flexible_frame_multi_function(self):
from pandas import concat
grouped = self.df.groupby('A')
exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]]))
exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]]))
expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sortlevel(0, axis=1)
d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]])
result = grouped.aggregate(d)
assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
expected = grouped.aggregate(OrderedDict([['C', np.mean],
['D', [np.mean, np.std]]]))
assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
d = OrderedDict([['C', np.mean], ['D', OrderedDict(
[['foo', np.mean], ['bar', np.std]])]])
result = grouped.aggregate(d)
d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]])
expected = grouped.aggregate(d)
assert_frame_equal(result, expected)
def test_multi_function_flexible_mix(self):
# GH #1268
grouped = self.df.groupby('A')
d = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', 'sum']])
result = grouped.aggregate(d)
d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', ['sum']]])
result2 = grouped.aggregate(d2)
d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [
'bar', 'std'
]])], ['D', {'sum': 'sum'}]])
expected = grouped.aggregate(d3)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_agg_callables(self):
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum, np.sum, lambda x: sum(x), lambda x: x.sum(),
partial(sum), fn_class()]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
assert_frame_equal(result, expected)
def test_set_group_name(self):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
def _check_all(grouped):
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({'C': freduce, 'D': freduce})
grouped.transform(f)
grouped['C'].apply(f)
grouped['C'].aggregate(freduce)
grouped['C'].aggregate([freduce, foo])
grouped['C'].transform(f)
_check_all(self.df.groupby('A'))
_check_all(self.df.groupby(['A', 'B']))
def test_no_dummy_key_names(self):
# GH #1291
result = self.df.groupby(self.df['A'].values).sum()
self.assertIsNone(result.index.name)
result = self.df.groupby([self.df['A'].values, self.df['B'].values
]).sum()
self.assertEqual(result.index.names, (None, None))
def test_groupby_sort_categorical(self):
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame([['(7.5, 10]', 10, 10],
['(7.5, 10]', 8, 20],
['(2.5, 5]', 5, 30],
['(5, 7.5]', 6, 40],
['(2.5, 5]', 4, 50],
['(0, 2.5]', 1, 60],
['(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])
df['range'] = Categorical(df['range'], ordered=True)
index = CategoricalIndex(
['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name='range')
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
col = 'range'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
df['range'] = Categorical(df['range'], ordered=False)
index = CategoricalIndex(
['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name='range')
result_sort = DataFrame([[1, 60], [5, 30], [6, 40], [10, 10]],
columns=['foo', 'bar'], index=index)
index = CategoricalIndex(['(7.5, 10]', '(2.5, 5]',
'(5, 7.5]', '(0, 2.5]'],
name='range')
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
index=index, columns=['foo', 'bar'])
col = 'range'
# this is an unordered categorical, but we allow this ####
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_sort_categorical_datetimelike(self):
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame({'dt': [datetime(2011, 7, 1), datetime(2011, 7, 1),
datetime(2011, 2, 1), datetime(2011, 5, 1),
datetime(2011, 2, 1), datetime(2011, 1, 1),
datetime(2011, 5, 1)],
'foo': [10, 8, 5, 6, 4, 1, 7],
'bar': [10, 20, 30, 40, 50, 60, 70]},
columns=['dt', 'foo', 'bar'])
# ordered=True
df['dt'] = Categorical(df['dt'], ordered=True)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt', ordered=True)
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt', ordered=True)
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False).first())
# ordered = False
df['dt'] = Categorical(df['dt'], ordered=False)
index = [datetime(2011, 1, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 7, 1)]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])
result_sort.index = CategoricalIndex(index, name='dt')
index = [datetime(2011, 7, 1), datetime(2011, 2, 1),
datetime(2011, 5, 1), datetime(2011, 1, 1)]
result_nosort = DataFrame([[10, 10], [5, 30], [6, 40], [1, 60]],
columns=['foo', 'bar'])
result_nosort.index = CategoricalIndex(index, categories=index,
name='dt')
col = 'dt'
assert_frame_equal(result_sort, df.groupby(col, sort=True).first())
assert_frame_equal(result_nosort, df.groupby(col, sort=False).first())
def test_groupby_sort_multiindex_series(self):
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=['a', 'b'])
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(levels=[[1, 2], [1, 2]],
labels=[[0, 0, 1], [1, 0, 0]], names=['a', 'b'])
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=['a', 'b'], sort=False).first()
assert_series_equal(result, mseries_result)
result = mseries.groupby(level=['a', 'b'], sort=True).first()
assert_series_equal(result, mseries_result.sort_index())
def test_groupby_categorical(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
exp_idx = CategoricalIndex(levels, ordered=True)
expected = expected.reindex(exp_idx)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(
Categorical(ord_labels), sort=False).describe()
expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(
np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
def test_groupby_datetime_categorical(self):
# GH9049: ensure backward compatibility
levels = pd.date_range('2014-01-01', periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats).mean()
expected = data.groupby(np.asarray(cats)).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(expected.index,
categories=expected.index,
ordered=True)
assert_frame_equal(result, expected)
grouped = data.groupby(cats)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels).describe()
expected.index.names = [None, None]
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0),
expected.index.get_level_values(0))
# GH 10460
expc = Categorical.from_codes(
np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
self.assert_index_equal(desc_result.index.get_level_values(0), exp)
exp = Index(['count', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max'] * 4)
self.assert_index_equal(desc_result.index.get_level_values(1), exp)
def test_groupby_categorical_index(self):
levels = ['foo', 'bar', 'baz', 'qux']
codes = np.random.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(
np.repeat(
np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
df['cats'] = cats
# with a cat index
result = df.set_index('cats').groupby(level=0).sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby('cats').sum()
expected = df[list('abcd')].groupby(cats.codes).sum()
expected.index = CategoricalIndex(
Categorical.from_codes(
[0, 1, 2, 3], levels, ordered=True), name='cats')
assert_frame_equal(result, expected)
def test_groupby_describe_categorical_columns(self):
# GH 11558
cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
categories=['foo', 'bar', 'baz', 'qux'],
ordered=True)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.columns, cats)
tm.assert_categorical_equal(result.columns.values, cats.values)
def test_groupby_unstack_categorical(self):
# GH11558 (example is taken from the original issue)
df = pd.DataFrame({'a': range(10),
'medium': ['A', 'B'] * 5,
'artist': list('XYXXY') * 2})
df['medium'] = df['medium'].astype('category')
gcat = df.groupby(['artist', 'medium'])['a'].count().unstack()
result = gcat.describe()
exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,
name='medium')
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat['A'] + gcat['B']
expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))
tm.assert_series_equal(result, expected)
def test_groupby_groups_datetimeindex(self):
# #1430
from pandas.tseries.api import DatetimeIndex
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(periods),
'low': np.arange(periods)}, index=ind)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
# it works!
groups = grouped.groups
tm.assertIsInstance(list(groups.keys())[0], datetime)
def test_groupby_groups_datetimeindex_tz(self):
# GH 3950
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': dates,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['datetime'] = df['datetime'].apply(
lambda d: Timestamp(d, tz='US/Pacific'))
exp_idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='datetime')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['datetime', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.DatetimeIndex(dates, tz='Asia/Tokyo')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='Asia/Tokyo')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_groupby_multi_timezone(self):
# combining multiple / different timezones yields UTC
data = """0,2000-01-28 16:47:00,America/Chicago
1,2000-01-29 16:48:00,America/Chicago
2,2000-01-30 16:49:00,America/Los_Angeles
3,2000-01-31 16:50:00,America/Chicago
4,2000-01-01 16:50:00,America/New_York"""
df = pd.read_csv(StringIO(data), header=None,
names=['value', 'date', 'tz'])
result = df.groupby('tz').date.apply(
lambda x: pd.to_datetime(x).dt.tz_localize(x.name))
expected = Series([Timestamp('2000-01-28 16:47:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-29 16:48:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-30 16:49:00-0800',
tz='America/Los_Angeles'),
Timestamp('2000-01-31 16:50:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-01 16:50:00-0500',
tz='America/New_York')],
name='date',
dtype=object)
assert_series_equal(result, expected)
tz = 'America/Chicago'
res_values = df.groupby('tz').date.get_group(tz)
result = pd.to_datetime(res_values).dt.tz_localize(tz)
exp_values = Series(['2000-01-28 16:47:00', '2000-01-29 16:48:00',
'2000-01-31 16:50:00'],
index=[0, 1, 3], name='date')
expected = pd.to_datetime(exp_values).dt.tz_localize(tz)
assert_series_equal(result, expected)
def test_groupby_groups_periods(self):
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'period': [pd.Period(d, freq='H') for d in dates],
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
exp_idx1 = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
freq='H', name='period')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['period', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.PeriodIndex(dates, freq='H')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], freq='H')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_groupby_reindex_inside_function(self):
from pandas.tseries.api import DatetimeIndex
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(
periods), 'low': np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.select(lambda x: x.hour < 11).dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({'high': agg_before(11, np.max)})
closure_good = grouped.agg({'high': agg_before(11, np.max, True)})
assert_frame_equal(closure_bad, closure_good)
def test_multiindex_columns_empty_level(self):
l = [['count', 'values'], ['to filter', '']]
midx = MultiIndex.from_tuples(l)
df = DataFrame([[long(1), 'A']], columns=midx)
grouped = df.groupby('to filter').groups
self.assert_numpy_array_equal(grouped['A'], [0])
grouped = df.groupby([('to filter', '')]).groups
self.assert_numpy_array_equal(grouped['A'], [0])
df = DataFrame([[long(1), 'A'], [long(2), 'B']], columns=midx)
expected = df.groupby('to filter').groups
result = df.groupby([('to filter', '')]).groups
self.assertEqual(result, expected)
df = DataFrame([[long(1), 'A'], [long(2), 'A']], columns=midx)
expected = df.groupby('to filter').groups
result = df.groupby([('to filter', '')]).groups
self.assertEqual(result, expected)
def test_cython_median(self):
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
assert_frame_equal(rs, xp)
def test_groupby_categorical_no_compress(self):
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean()
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats).mean()
exp = data.groupby(codes).mean().reindex(cats.categories)
exp.index = CategoricalIndex(exp.index, categories=cats.categories,
ordered=cats.ordered)
assert_series_equal(result, exp)
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b").mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
self.assert_numpy_array_equal(result, exp)
def test_groupby_non_arithmetic_agg_types(self):
# GH9311, GH6620
df = pd.DataFrame([{'a': 1,
'b': 1}, {'a': 1,
'b': 2}, {'a': 2,
'b': 3}, {'a': 2,
'b': 4}])
dtypes = ['int8', 'int16', 'int32', 'int64', 'float32', 'float64']
grp_exp = {'first': {'df': [{'a': 1,
'b': 1}, {'a': 2,
'b': 3}]},
'last': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 4}]},
'min': {'df': [{'a': 1,
'b': 1}, {'a': 2,
'b': 3}]},
'max': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 4}]},
'nth': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 4}],
'args': [1]},
'count': {'df': [{'a': 1,
'b': 2}, {'a': 2,
'b': 2}],
'out_type': 'int64'}}
for dtype in dtypes:
df_in = df.copy()
df_in['b'] = df_in.b.astype(dtype)
for method, data in compat.iteritems(grp_exp):
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df_in.groupby('a')
t = getattr(grpd, method)(*data['args'])
assert_frame_equal(t, df_out)
def test_groupby_non_arithmetic_agg_intlike_precision(self):
# GH9311, GH6620
c = 24650000000000000
inputs = ((Timestamp('2011-01-15 12:50:28.502376'),
Timestamp('2011-01-20 12:50:28.593448')), (1 + c, 2 + c))
for i in inputs:
df = pd.DataFrame([{'a': 1, 'b': i[0]}, {'a': 1, 'b': i[1]}])
grp_exp = {'first': {'expected': i[0]},
'last': {'expected': i[1]},
'min': {'expected': i[0]},
'max': {'expected': i[1]},
'nth': {'expected': i[1],
'args': [1]},
'count': {'expected': 2}}
for method, data in compat.iteritems(grp_exp):
if 'args' not in data:
data['args'] = []
grpd = df.groupby('a')
res = getattr(grpd, method)(*data['args'])
self.assertEqual(res.iloc[0].b, data['expected'])
def test_groupby_first_datetime64(self):
df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
df[1] = df[1].view('M8[ns]')
self.assertTrue(issubclass(df[1].dtype.type, np.datetime64))
result = df.groupby(level=0).first()
got_dt = result[1].dtype
self.assertTrue(issubclass(got_dt.type, np.datetime64))
result = df[1].groupby(level=0).first()
got_dt = result.dtype
self.assertTrue(issubclass(got_dt.type, np.datetime64))
def test_groupby_max_datetime64(self):
# GH 5869
# datetimelike dtype conversion from int
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = df.groupby('A')['A'].apply(lambda x: x.max())
result = df.groupby('A')['A'].max()
assert_series_equal(result, expected)
def test_groupby_datetime64_32_bit(self):
# GH 6410 / numpy 4328
# 32-bit under 1.9-dev indexing issue
df = DataFrame({"A": range(2), "B": [pd.Timestamp('2000-01-1')] * 2})
result = df.groupby("A")["B"].transform(min)
expected = Series([pd.Timestamp('2000-01-1')] * 2)
assert_series_equal(result, expected)
def test_groupby_categorical_unequal_len(self):
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
# The raises only happens with categorical, not with series of types
# category
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
self.assertRaises(ValueError, lambda: series.groupby(bins).mean())
def test_groupby_multiindex_missing_pair(self):
# GH9049
df = DataFrame({'group1': ['a', 'a', 'a', 'b'],
'group2': ['c', 'c', 'd', 'c'],
'value': [1, 1, 1, 5]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
res = df_grouped.agg('sum')
idx = MultiIndex.from_tuples(
[('a', 'c'), ('a', 'd'), ('b', 'c')], names=['group1', 'group2'])
exp = DataFrame([[2], [1], [5]], index=idx, columns=['value'])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby('a').mean()
with tm.assert_produces_warning(com.PerformanceWarning):
result = not_lexsorted_df.groupby('a').mean()
tm.assert_frame_equal(expected, result)
def test_groupby_levels_and_columns(self):
# GH9344, GH9049
idx_names = ['x', 'y']
idx = pd.MultiIndex.from_tuples(
[(1, 1), (1, 2), (3, 4), (5, 6)], names=idx_names)
df = pd.DataFrame(np.arange(12).reshape(-1, 3), index=idx)
by_levels = df.groupby(level=idx_names).mean()
# reset_index changes columns dtype to object
by_columns = df.reset_index().groupby(idx_names).mean()
tm.assert_frame_equal(by_levels, by_columns, check_column_type=False)
by_columns.columns = pd.Index(by_columns.columns, dtype=np.int64)
tm.assert_frame_equal(by_levels, by_columns)
def test_gb_apply_list_of_unequal_len_arrays(self):
# GH1738
df = DataFrame({'group1': ['a', 'a', 'a', 'b', 'b', 'b', 'a', 'a', 'a',
'b', 'b', 'b'],
'group2': ['c', 'c', 'd', 'd', 'd', 'e', 'c', 'c', 'd',
'd', 'd', 'e'],
'weight': [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
'value': [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
def noddy(value, weight):
out = np.array(value * weight).repeat(3)
return out
# the kernel function returns arrays of unequal length
# pandas sniffs the first one, sees it's an array and not
# a list, and assumed the rest are of equal length
# and so tries a vstack
# don't die
df_grouped.apply(lambda x: noddy(x.value, x.weight))
def test_groupby_with_empty(self):
index = pd.DatetimeIndex(())
data = ()
series = pd.Series(data, index)
grouper = pd.tseries.resample.TimeGrouper('D')
grouped = series.groupby(grouper)
assert next(iter(grouped), None) is None
def test_aaa_groupby_with_small_elem(self):
# GH 8542
# length=2
df = pd.DataFrame({'event': ['start', 'start'],
'change': [1234, 5678]},
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10']))
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
self.assertEqual(len(grouped.groups), 2)
self.assertEqual(grouped.ngroups, 2)
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
tm.assert_frame_equal(res, df.iloc[[0], :])
res = grouped.get_group((pd.Timestamp('2013-10-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[1], :])
df = pd.DataFrame({'event': ['start', 'start', 'start'],
'change': [1234, 5678, 9123]},
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10',
'2014-09-15']))
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
self.assertEqual(len(grouped.groups), 2)
self.assertEqual(grouped.ngroups, 2)
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
tm.assert_frame_equal(res, df.iloc[[0, 2], :])
res = grouped.get_group((pd.Timestamp('2013-10-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[1], :])
# length=3
df = pd.DataFrame({'event': ['start', 'start', 'start'],
'change': [1234, 5678, 9123]},
index=pd.DatetimeIndex(['2014-09-10', '2013-10-10',
'2014-08-05']))
grouped = df.groupby([pd.TimeGrouper(freq='M'), 'event'])
self.assertEqual(len(grouped.groups), 3)
self.assertEqual(grouped.ngroups, 3)
self.assertIn((pd.Timestamp('2014-09-30'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2013-10-31'), 'start'), grouped.groups)
self.assertIn((pd.Timestamp('2014-08-31'), 'start'), grouped.groups)
res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
tm.assert_frame_equal(res, df.iloc[[0], :])
res = grouped.get_group((pd.Timestamp('2013-10-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[1], :])
res = grouped.get_group((pd.Timestamp('2014-08-31'), 'start'))
tm.assert_frame_equal(res, df.iloc[[2], :])
def test_groupby_with_timezone_selection(self):
# GH 11616
# Test that column selection returns output in correct timezone.
np.random.seed(42)
df = pd.DataFrame({
'factor': np.random.randint(0, 3, size=60),
'time': pd.date_range('01/01/2000 00:00', periods=60,
freq='s', tz='UTC')
})
df1 = df.groupby('factor').max()['time']
df2 = df.groupby('factor')['time'].max()
tm.assert_series_equal(df1, df2)
def test_timezone_info(self):
# GH 11682
# Timezone info lost when broadcasting scalar datetime to DataFrame
tm._skip_if_no_pytz()
import pytz
df = pd.DataFrame({'a': [1], 'b': [datetime.now(pytz.utc)]})
tm.assert_equal(df['b'][0].tzinfo, pytz.utc)
df = pd.DataFrame({'a': [1, 2, 3]})
df['b'] = datetime.now(pytz.utc)
tm.assert_equal(df['b'][0].tzinfo, pytz.utc)
def test_groupby_with_timegrouper(self):
# GH 4161
# TimeGrouper requires a sorted index
# also verifies that the resultant index has the correct name
import datetime as DT
df_original = DataFrame({
'Buyer': 'Carl Carl Carl Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [
DT.datetime(2013, 9, 1, 13, 0),
DT.datetime(2013, 9, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 3, 10, 0),
DT.datetime(2013, 12, 2, 12, 0),
DT.datetime(2013, 9, 2, 14, 0),
]
})
# GH 6908 change target column's order
df_reordered = df_original.sort_values(by='Quantity')
for df in [df_original, df_reordered]:
df = df.set_index(['Date'])
expected = DataFrame(
{'Quantity': np.nan},
index=date_range('20130901 13:00:00',
'20131205 13:00:00', freq='5D',
name='Date', closed='left'))
expected.iloc[[0, 6, 18], 0] = np.array(
[24., 6., 9.], dtype='float64')
result1 = df.resample('5D') .sum()
assert_frame_equal(result1, expected)
df_sorted = df.sort_index()
result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result2, expected)
result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result3, expected)
def test_groupby_with_timegrouper_methods(self):
# GH 3881
# make sure API of timegrouper conforms
import datetime as DT
df_original = pd.DataFrame({
'Branch': 'A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 8, 9, 3],
'Date': [
DT.datetime(2013, 1, 1, 13, 0),
DT.datetime(2013, 1, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 12, 2, 12, 0),
DT.datetime(2013, 12, 2, 14, 0),
]
})
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
df = df.set_index('Date', drop=False)
g = df.groupby(pd.TimeGrouper('6M'))
self.assertTrue(g.group_keys)
self.assertTrue(isinstance(g.grouper, pd.core.groupby.BinGrouper))
groups = g.groups
self.assertTrue(isinstance(groups, dict))
self.assertTrue(len(groups) == 3)
def test_timegrouper_with_reg_groups(self):
# GH 3794
# allow combinateion of timegrouper/reg groups
import datetime as DT
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
DT.datetime(2013, 1, 1, 13, 0),
DT.datetime(2013, 1, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 12, 2, 12, 0),
DT.datetime(2013, 12, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
DT.datetime(2013, 12, 31, 0, 0),
DT.datetime(2013, 12, 31, 0, 0),
DT.datetime(2013, 12, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='A'), 'Buyer']).sum()
assert_frame_equal(result, expected)
expected = DataFrame({
'Buyer': 'Carl Mark Carl Joe'.split(),
'Quantity': [1, 3, 9, 18],
'Date': [
DT.datetime(2013, 1, 1, 0, 0),
DT.datetime(2013, 1, 1, 0, 0),
DT.datetime(2013, 7, 1, 0, 0),
DT.datetime(2013, 7, 1, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='6MS'), 'Buyer']).sum()
assert_frame_equal(result, expected)
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
DT.datetime(2013, 10, 1, 13, 0),
DT.datetime(2013, 10, 1, 13, 5),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 10, 1, 20, 0),
DT.datetime(2013, 10, 2, 10, 0),
DT.datetime(2013, 10, 2, 12, 0),
DT.datetime(2013, 10, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark Carl Joe'.split(),
'Quantity': [6, 8, 3, 4, 10],
'Date': [
DT.datetime(2013, 10, 1, 0, 0),
DT.datetime(2013, 10, 1, 0, 0),
DT.datetime(2013, 10, 1, 0, 0),
DT.datetime(2013, 10, 2, 0, 0),
DT.datetime(2013, 10, 2, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='1D'), 'Buyer']).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M'), 'Buyer']).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
DT.datetime(2013, 10, 31, 0, 0),
DT.datetime(2013, 10, 31, 0, 0),
DT.datetime(2013, 10, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# passing the name
df = df.reset_index()
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
with self.assertRaises(KeyError):
df.groupby([pd.Grouper(freq='1M', key='foo'), 'Buyer']).sum()
# passing the level
df = df.set_index('Date')
result = df.groupby([pd.Grouper(freq='1M', level='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', level=0), 'Buyer']).sum(
)
assert_frame_equal(result, expected)
with self.assertRaises(ValueError):
df.groupby([pd.Grouper(freq='1M', level='foo'),
'Buyer']).sum()
# multi names
df = df.copy()
df['Date'] = df.index + pd.offsets.MonthEnd(2)
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
DT.datetime(2013, 11, 30, 0, 0),
DT.datetime(2013, 11, 30, 0, 0),
DT.datetime(2013, 11, 30, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# error as we have both a level and a name!
with self.assertRaises(ValueError):
df.groupby([pd.Grouper(freq='1M', key='Date',
level='Date'), 'Buyer']).sum()
# single groupers
expected = DataFrame({'Quantity': [31],
'Date': [DT.datetime(2013, 10, 31, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M')]).sum()
assert_frame_equal(result, expected)
expected = DataFrame({'Quantity': [31],
'Date': [DT.datetime(2013, 11, 30, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M', key='Date')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum()
assert_frame_equal(result, expected)
# GH 6764 multiple grouping with/without sort
df = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, 364, 280, 259, 201, 623, 90, 312, 359, 301,
359, 801],
'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12]
}).set_index('date')
for freq in ['D', 'M', 'A', 'Q-APR']:
expected = df.groupby('user_id')[
'whole_cost'].resample(
freq).sum().dropna().reorder_levels(
['date', 'user_id']).sortlevel().astype('int64')
expected.name = 'whole_cost'
result1 = df.sort_index().groupby([pd.TimeGrouper(freq=freq),
'user_id'])['whole_cost'].sum()
assert_series_equal(result1, expected)
result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])[
'whole_cost'].sum()
assert_series_equal(result2, expected)
def test_timegrouper_get_group(self):
# GH 6914
df_original = DataFrame({
'Buyer': 'Carl Joe Joe Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0), ]
})
df_reordered = df_original.sort_values(by='Quantity')
# single grouping
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
dt_list = ['2013-09-30', '2013-10-31', '2013-12-31']
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M', key='Date'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
# multiple grouping
expected_list = [df_original.iloc[[1]], df_original.iloc[[3]],
df_original.iloc[[4]]]
g_list = [('Joe', '2013-09-30'), ('Carl', '2013-10-31'),
('Joe', '2013-12-31')]
for df in [df_original, df_reordered]:
grouped = df.groupby(['Buyer', pd.Grouper(freq='M', key='Date')])
for (b, t), expected in zip(g_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group((b, dt))
assert_frame_equal(result, expected)
# with index
df_original = df_original.set_index('Date')
df_reordered = df_original.sort_values(by='Quantity')
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
def test_timegrouper_apply_return_type_series(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_series(x):
return pd.Series([x['value'].sum()], ('sum',))
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_series)
result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
.apply(sumfunc_series))
assert_frame_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_timegrouper_apply_return_type_value(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_value(x):
return x.value.sum()
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_value)
result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
.apply(sumfunc_value))
assert_series_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_cumcount(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'])
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3])
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
e = Series(dtype='int64'
) # edge case, as this is usually considered float
assert_series_equal(e, ge.cumcount())
assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.ix[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.ix[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = df.groupby(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_series_equal(df['A'], result)
result = grouped['A'].transform(len)
expected = Series([2, 3, 2, 3, 3], name='A')
assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(df, result)
result = grouped.transform('sum')
expected = DataFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
# previously didn't have access to col A #????
filt = g.filter(lambda x: x['A'].sum() == 2)
assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness(self):
df = pd.DataFrame([
['best', 'a', 'x'],
['worst', 'b', 'y'],
['best', 'c', 'x'],
['best', 'd', 'y'],
['worst', 'd', 'y'],
['worst', 'd', 'y'],
['best', 'd', 'z'],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
df.groupby('c').filter(lambda g: g['a'] == 'best')
def test_filter_non_bool_raises(self):
df = pd.DataFrame([
['best', 'a', 1],
['worst', 'b', 1],
['best', 'c', 1],
['best', 'd', 1],
['worst', 'd', 1],
['worst', 'd', 1],
['best', 'd', 1],
], columns=['a', 'b', 'c'])
with tm.assertRaisesRegexp(TypeError, 'filter function returned a.*'):
df.groupby('a').filter(lambda g: g.c.mean())
def test_fill_constistency(self):
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
assert_frame_equal(result, expected)
def test_index_label_overlaps_location(self):
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list('ABCDE'), index=[2, 0, 2, 1, 1])
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list('ababb'))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
assert_series_equal(actual, expected)
def test_groupby_selection_with_methods(self):
# some methods which require DatetimeIndex
rng = pd.date_range('2014', periods=len(self.df))
self.df.index = rng
g = self.df.groupby(['A'])[['C']]
g_exp = self.df[['C']].groupby(self.df['A'])
# TODO check groupby with > 1 col ?
# methods which are called as .foo()
methods = ['count',
'corr',
'cummax',
'cummin',
'cumprod',
'describe',
'rank',
'quantile',
'diff',
'shift',
'all',
'any',
'idxmin',
'idxmax',
'ffill',
'bfill',
'pct_change',
'tshift']
for m in methods:
res = getattr(g, m)()
exp = getattr(g_exp, m)()
assert_frame_equal(res, exp) # should always be frames!
# methods which aren't just .foo()
assert_frame_equal(g.fillna(0), g_exp.fillna(0))
assert_frame_equal(g.dtypes, g_exp.dtypes)
assert_frame_equal(g.apply(lambda x: x.sum()),
g_exp.apply(lambda x: x.sum()))
assert_frame_equal(g.resample('D').mean(), g_exp.resample('D').mean())
assert_frame_equal(g.resample('D').ohlc(),
g_exp.resample('D').ohlc())
assert_frame_equal(g.filter(lambda x: len(x) == 3),
g_exp.filter(lambda x: len(x) == 3))
def test_groupby_whitelist(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
s = df.floats
df_whitelist = frozenset([
'last',
'first',
'mean',
'sum',
'min',
'max',
'head',
'tail',
'cumsum',
'cumprod',
'cummin',
'cummax',
'cumcount',
'resample',
'describe',
'rank',
'quantile',
'fillna',
'mad',
'any',
'all',
'take',
'idxmax',
'idxmin',
'shift',
'tshift',
'ffill',
'bfill',
'pct_change',
'skew',
'plot',
'boxplot',
'hist',
'median',
'dtypes',
'corrwith',
'corr',
'cov',
'diff',
])
s_whitelist = frozenset([
'last',
'first',
'mean',
'sum',
'min',
'max',
'head',
'tail',
'cumsum',
'cumprod',
'cummin',
'cummax',
'cumcount',
'resample',
'describe',
'rank',
'quantile',
'fillna',
'mad',
'any',
'all',
'take',
'idxmax',
'idxmin',
'shift',
'tshift',
'ffill',
'bfill',
'pct_change',
'skew',
'plot',
'hist',
'median',
'dtype',
'corr',
'cov',
'diff',
'unique',
# 'nlargest', 'nsmallest',
])
for obj, whitelist in zip((df, s), (df_whitelist, s_whitelist)):
gb = obj.groupby(df.letters)
self.assertEqual(whitelist, gb._apply_whitelist)
for m in whitelist:
getattr(type(gb), m)
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
AGG_FUNCTIONS_WITH_SKIPNA = ['skew', 'mad']
def test_groupby_whitelist_deprecations(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# 10711 deprecated
with tm.assert_produces_warning(FutureWarning):
df.groupby('letters').irow(0)
with tm.assert_produces_warning(FutureWarning):
df.groupby('letters').floats.irow(0)
def test_regression_whitelist_methods(self):
# GH6944
# explicity test the whitelest methods
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
raw_frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
raw_frame.ix[1, [1, 2]] = np.nan
raw_frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[True, False]):
if axis == 0:
frame = raw_frame
else:
frame = raw_frame.T
if op in self.AGG_FUNCTIONS_WITH_SKIPNA:
grouped = frame.groupby(level=level, axis=axis)
result = getattr(grouped, op)(skipna=skipna)
expected = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
assert_frame_equal(result, expected)
else:
grouped = frame.groupby(level=level, axis=axis)
result = getattr(grouped, op)()
expected = getattr(frame, op)(level=level, axis=axis)
assert_frame_equal(result, expected)
def test_groupby_blacklist(self):
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
s = df.floats
blacklist = [
'eval', 'query', 'abs', 'where',
'mask', 'align', 'groupby', 'clip', 'astype',
'at', 'combine', 'consolidate', 'convert_objects',
]
to_methods = [method for method in dir(df) if method.startswith('to_')]
blacklist.extend(to_methods)
# e.g., to_csv
defined_but_not_allowed = ("(?:^Cannot.+{0!r}.+{1!r}.+try using the "
"'apply' method$)")
# e.g., query, eval
not_defined = "(?:^{1!r} object has no attribute {0!r}$)"
fmt = defined_but_not_allowed + '|' + not_defined
for bl in blacklist:
for obj in (df, s):
gb = obj.groupby(df.letters)
msg = fmt.format(bl, type(gb).__name__)
with tm.assertRaisesRegexp(AttributeError, msg):
getattr(gb, bl)
def test_tab_completion(self):
grp = self.mframe.groupby(level='second')
results = set([v for v in dir(grp) if not v.startswith('_')])
expected = set(
['A', 'B', 'C', 'agg', 'aggregate', 'apply', 'boxplot', 'filter',
'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max',
'mean', 'median', 'min', 'name', 'ngroups', 'nth', 'ohlc', 'plot',
'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count',
'head', 'irow', 'describe', 'cummax', 'quantile', 'rank',
'cumprod', 'tail', 'resample', 'cummin', 'fillna', 'cumsum',
'cumcount', 'all', 'shift', 'skew', 'bfill', 'ffill', 'take',
'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith', 'cov',
'dtypes', 'ndim', 'diff', 'idxmax', 'idxmin',
'ffill', 'bfill', 'pad', 'backfill', 'rolling', 'expanding'])
self.assertEqual(results, expected)
def test_lexsort_indexer(self):
keys = [[nan] * 5 + list(range(100)) + [nan] * 5]
# orders=True, na_position='last'
result = _lexsort_indexer(keys, orders=True, na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# orders=True, na_position='first'
result = _lexsort_indexer(keys, orders=True, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# orders=False, na_position='last'
result = _lexsort_indexer(keys, orders=False, na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105,
110))
assert_equal(result, expected)
# orders=False, na_position='first'
result = _lexsort_indexer(keys, orders=False, na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4,
-1))
assert_equal(result, expected)
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan] * 5 + list(range(100)) + [nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
try:
# GH 2785; due to a regression in NumPy1.6.2
np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i'))
np.argsort(items2, kind='mergesort')
except TypeError:
raise nose.SkipTest('requested sort not available for type')
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = _nargsort(items, kind='mergesort', ascending=True,
na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='first'
result = _nargsort(items, kind='mergesort', ascending=True,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='last'
result = _nargsort(items, kind='mergesort', ascending=False,
na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105,
110))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='first'
result = _nargsort(items, kind='mergesort', ascending=False,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4,
-1))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='last'
result = _nargsort(items2, kind='mergesort', ascending=True,
na_position='last')
expected = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
assert_equal(result, expected)
# mergesort, ascending=True, na_position='first'
result = _nargsort(items2, kind='mergesort', ascending=True,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='last'
result = _nargsort(items2, kind='mergesort', ascending=False,
na_position='last')
expected = list(range(104, 4, -1)) + list(range(5)) + list(range(105,
110))
assert_equal(result, expected)
# mergesort, ascending=False, na_position='first'
result = _nargsort(items2, kind='mergesort', ascending=False,
na_position='first')
expected = list(range(5)) + list(range(105, 110)) + list(range(104, 4,
-1))
assert_equal(result, expected)
def test_datetime_count(self):
df = DataFrame({'a': [1, 2, 3] * 2,
'dates': pd.date_range('now', periods=6, freq='T')})
result = df.groupby('a').dates.count()
expected = Series([
2, 2, 2
], index=Index([1, 2, 3], name='a'), name='dates')
tm.assert_series_equal(result, expected)
def test_lower_int_prec_count(self):
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception(self):
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
def test__cython_agg_general(self):
ops = [('mean', np.mean),
('median', np.median),
('var', np.var),
('add', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]), ]
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = df.groupby(labels)._cython_agg_general(op)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_cython_group_transform_algos(self):
# GH 4095
dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
np.uint64, np.float32, np.float64]
ops = [(pd.algos.group_cumprod_float64, np.cumproduct, [np.float64]),
(pd.algos.group_cumsum, np.cumsum, dtypes)]
for pd_op, np_op, dtypes in ops:
for dtype in dtypes:
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
accum = np.array([[0]], dtype=dtype)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, accum)
self.assert_numpy_array_equal(np_op(data), ans[:, 0])
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
accum = np.array([[0.0]])
actual = np.zeros_like(data)
actual.fill(np.nan)
pd.algos.group_cumprod_float64(actual, data, labels, accum)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
self.assert_numpy_array_equal(actual[:, 0], expected)
accum = np.array([[0.0]])
actual = np.zeros_like(data)
actual.fill(np.nan)
pd.algos.group_cumsum(actual, data, labels, accum)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
self.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
accum = np.array([[0]], dtype='int64')
actual = np.zeros_like(data, dtype='int64')
pd.algos.group_cumsum(actual, data.view('int64'), labels, accum)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
self.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
def test_cython_transform(self):
# GH 4095
ops = [(('cumprod',
()), lambda x: x.cumprod()), (('cumsum', ()),
lambda x: x.cumsum()),
(('shift', (-1, )),
lambda x: x.shift(-1)), (('shift',
(1, )), lambda x: x.shift())]
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for (op, args), targop in ops:
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(expected,
data.groupby(labels).transform(op,
*args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50})
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_selection_from_grouper()
for (op, args), targop in ops:
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply seperately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(expected, getattr(gb, op)(*args))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
self.assertRaises(DataError, gb[c].transform, op)
self.assertRaises(DataError, getattr(gb[c], op))
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_groupby_cumprod(self):
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general(self):
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug(self):
raw = """,Date,app,File
2013-04-23,2013-04-23 00:00:00,,log080001.log
2013-05-06,2013-05-06 00:00:00,,log.log
2013-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
self.assertFalse(r['File'].isnull().any())
def test_nlargest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
assert_series_equal(gb.nlargest(3, keep='last'), e)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(gb.nlargest(3, take_last=True), e)
def test_nsmallest(self):
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
assert_series_equal(gb.nsmallest(3, keep='last'), e)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(gb.nsmallest(3, take_last=True), e)
def test_transform_doesnt_clobber_ints(self):
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({'a': x // 2, 'b': 2.0 * x, 'c': 3.0 * x})
df2 = DataFrame({'a': x // 2 * 1.0, 'b': 2.0 * x, 'c': 3.0 * x})
gb = df.groupby('a')
result = gb.transform('mean')
gb2 = df2.groupby('a')
expected = gb2.transform('mean')
tm.assert_frame_equal(result, expected)
def test_groupby_categorical_two_columns(self):
# https://github.com/pydata/pandas/issues/8138
d = {'cat':
pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
ordered=True),
'ints': [1, 1, 2, 2],
'val': [10, 20, 30, 40]}
test = pd.DataFrame(d)
# Grouping on a single column
groups_single_key = test.groupby("cat")
res = groups_single_key.agg('mean')
exp = DataFrame({"ints": [1.5, 1.5, np.nan], "val": [20, 30, np.nan]},
index=pd.CategoricalIndex(["a", "b", "c"], name="cat"))
tm.assert_frame_equal(res, exp)
# Grouping on two columns
groups_double_key = test.groupby(["cat", "ints"])
res = groups_double_key.agg('mean')
exp = DataFrame({"val": [10, 30, 20, 40, np.nan, np.nan],
"cat": ["a", "a", "b", "b", "c", "c"],
"ints": [1, 2, 1, 2, 1, 2]}).set_index(["cat", "ints"
])
tm.assert_frame_equal(res, exp)
# GH 10132
for key in [('a', 1), ('b', 2), ('b', 1), ('a', 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = test[(test.cat == c) & (test.ints == i)]
assert_frame_equal(result, expected)
d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
test = pd.DataFrame(d)
values = pd.cut(test['C1'], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = test.groupby([values, 'C2'])
res = groups_double_key.agg('mean')
nan = np.nan
idx = MultiIndex.from_product([["(1, 2]", "(2, 3]", "(3, 6]"],
[1, 2, 3, 4]],
names=["cat", "C2"])
exp = DataFrame({"C1": [nan, nan, nan, nan, 3, 3,
nan, nan, nan, nan, 4, 5],
"C3": [nan, nan, nan, nan, 10, 100,
nan, nan, nan, nan, 200, 34]}, index=idx)
tm.assert_frame_equal(res, exp)
def test_groupby_apply_all_none(self):
# Tests to make sure no errors if apply function returns all None
# values. Issue 9684.
test_df = DataFrame({'groups': [0, 0, 1, 1],
'random_vars': [8, 7, 4, 5]})
def test_func(x):
pass
result = test_df.groupby('groups').apply(test_func)
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_first_last_max_min_on_time_data(self):
# GH 10295
# Verify that NaT is not in the result of max, min, first and last on
# Dataframe with datetime or timedelta values.
from datetime import timedelta as td
df_test = DataFrame(
{'dt': [nan, '2015-07-24 10:10', '2015-07-25 11:11',
'2015-07-23 12:12', nan],
'td': [nan, td(days=1), td(days=2), td(days=3), nan]})
df_test.dt = pd.to_datetime(df_test.dt)
df_test['group'] = 'A'
df_ref = df_test[df_test.dt.notnull()]
grouped_test = df_test.groupby('group')
grouped_ref = df_ref.groupby('group')
assert_frame_equal(grouped_ref.max(), grouped_test.max())
assert_frame_equal(grouped_ref.min(), grouped_test.min())
assert_frame_equal(grouped_ref.first(), grouped_test.first())
assert_frame_equal(grouped_ref.last(), grouped_test.last())
def test_groupby_preserves_sort(self):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{'int_groups': [3, 1, 0, 1, 0, 3, 3, 3],
'string_groups': ['z', 'a', 'z', 'a', 'a', 'g', 'g', 'g'],
'ints': [8, 7, 4, 5, 2, 9, 1, 1],
'floats': [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
'strings': ['z', 'd', 'a', 'e', 'word', 'word2', '42', '47']})
# Try sorting on different types and with different group types
for sort_column in ['ints', 'floats', 'strings', ['ints', 'floats'],
['ints', 'strings']]:
for group_column in ['int_groups', 'string_groups',
['int_groups', 'string_groups']]:
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_nunique_with_object(self):
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_transform_with_non_scalar_group(self):
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
self.assertRaisesRegexp(ValueError, 'transform must return a scalar '
'value for each group.*', df.groupby
(axis=1, level=1).transform,
lambda z: z.div(z.sum(axis=1), axis=0))
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com._asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in compat.iteritems(expected):
assert (result[k] == v)
def test_decons():
from pandas.core.groupby import decons_group_index, get_group_index
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
assert (np.array_equal(a, b))
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100), np.tile(
[0, 2, 4, 3, 0, 1, 2, 3], 100), np.tile(
[5, 1, 0, 2, 3, 0, 5, 4], 100)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)]
testit(label_list, shape)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure', '-s'
], exit=False)
| mit |
rodluger/planetplanet | scripts/exomoon.py | 1 | 2109 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
exomoon.py |github|
-------------------
A silly example of a star-planet-moon system. The
orbital parameters are unrealistic, but this showcases
how the code can handle moons. Click on an event to
see it in detail.
.. plot::
:align: center
from scripts import exomoon
exomoon._test()
.. warning:: The ability to model moons is still in beta and hasn't been \
thoroughly tested. Please \
`let us know <https://github.com/rodluger/planetplanet/issues>`_ \
if you find any bugs!
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/scripts/exomoon.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
import os, sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from planetplanet import Planet, Star, Moon, System
from planetplanet.constants import *
import matplotlib.pyplot as pl
import numpy as np
def _test():
'''
'''
plot()
def plot():
'''
'''
# Instantiate the star
star = Star('A', m = 0.1, r = 0.1, nz = 31, color = 'k',
limbdark = [0.4, 0.26])
# Planet b
b = Planet('b', m = 50., per = 1, inc = 90., r = 1., t0 = 0,
nz = 11, Omega = 0, w = 0., ecc = 0.,
phasecurve = True, color = 'r')
# Moon
m = Moon('bI', host = 'b', m = 0., per = 0.1, inc = 90., r = 0.5, t0 = 0.5,
nz = 11, Omega = 10, w = 0., ecc = 0., phasecurve = True,
color = 'b')
# Compute the light curve
system = System(star, b, m, nbody = True, integrator = 'ias15',
timestep = MINUTE)
time = np.arange(-1.1, 1.1, 0.1 * MINUTE)
system.compute(time, lambda2 = 100)
# Plot
system.plot_lightcurve(wavelength = 100, interactive = True)
if __name__ == '__main__':
plot() | gpl-3.0 |
alexsavio/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
mtagle/airflow | airflow/providers/amazon/aws/operators/hive_to_dynamodb.py | 3 | 4161 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains operator to move data from Hive to DynamoDB.
"""
import json
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.aws_dynamodb import AwsDynamoDBHook
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBTransferOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database. (templated)
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
sql,
table_name,
table_keys,
pre_process=None,
pre_process_args=None,
pre_process_kwargs=None,
region_name=None,
schema='default',
hiveserver2_conn_id='hiveserver2_default',
aws_conn_id='aws_default',
*args, **kwargs):
super().__init__(*args, **kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info('Extracting data from Hive')
self.log.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(aws_conn_id=self.aws_conn_id,
table_name=self.table_name,
table_keys=self.table_keys,
region_name=self.region_name)
self.log.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(
json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data,
args=self.pre_process_args,
kwargs=self.pre_process_kwargs))
self.log.info('Done.')
| apache-2.0 |
ouyangyike/Machine-Learning-and-Data-Mining | Nerual Network/util.py | 1 | 2372 | from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
def LoadData(fname):
"""Loads data from an NPZ file.
Args:
fname: NPZ filename.
Returns:
data: Tuple {inputs, target}_{train, valid, test}.
Row-major, outer axis to be the number of observations.
"""
npzfile = np.load(fname)
inputs_train = npzfile['inputs_train'].T / 255.0
inputs_valid = npzfile['inputs_valid'].T / 255.0
inputs_test = npzfile['inputs_test'].T / 255.0
target_train = npzfile['target_train'].tolist()
target_valid = npzfile['target_valid'].tolist()
target_test = npzfile['target_test'].tolist()
num_class = max(target_train + target_valid + target_test) + 1
target_train_1hot = np.zeros([num_class, len(target_train)])
target_valid_1hot = np.zeros([num_class, len(target_valid)])
target_test_1hot = np.zeros([num_class, len(target_test)])
for ii, xx in enumerate(target_train):
target_train_1hot[xx, ii] = 1.0
for ii, xx in enumerate(target_valid):
target_valid_1hot[xx, ii] = 1.0
for ii, xx in enumerate(target_test):
target_test_1hot[xx, ii] = 1.0
inputs_train = inputs_train.T
inputs_valid = inputs_valid.T
inputs_test = inputs_test.T
target_train_1hot = target_train_1hot.T
target_valid_1hot = target_valid_1hot.T
target_test_1hot = target_test_1hot.T
return inputs_train, inputs_valid, inputs_test, target_train_1hot, target_valid_1hot, target_test_1hot
def Save(fname, data):
"""Saves the model to a numpy file."""
print('Writing to ' + fname)
np.savez_compressed(fname, **data)
def Load(fname):
"""Loads model from numpy file."""
print('Loading from ' + fname)
return dict(np.load(fname))
def DisplayPlot(train, valid, ylabel, number=0):
"""Displays training curve.
Args:
train: Training statistics.
valid: Validation statistics.
ylabel: Y-axis label of the plot.
"""
plt.figure(number)
plt.clf()
train = np.array(train)
valid = np.array(valid)
plt.plot(train[:, 0], train[:, 1], 'b', label='Train')
plt.plot(valid[:, 0], valid[:, 1], 'g', label='Validation')
plt.xlabel('Epoch')
plt.ylabel(ylabel)
plt.legend()
plt.draw()
plt.pause(0.0001)
| mit |
moutai/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
DevinZ1993/Pieces-of-Code | python/utils/batch_c2g.py | 4 | 2873 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
# script_path = os.path.realpath(__file__)
# runtime_path = os.getcwd()
# parent_path = os.path.dirname(path)
# name = os.path.basename(path)
# subpath = os.path.join(dir_path,rel_path)
# rel_path = os.path.relpath(path,dir_path)
# Information about a path:
# os.path.exists(path)
# os.path.isdir(path)
# os.path.isfile(path)
# os.path.getsize(path)
import re
import shutil
from scipy import misc
import numpy as np
import matplotlib.pyplot as plt
def batch_proc(IN_TOP,OUT_TOP,file_filter,get_dest,file_map):
print "Batch Processing: this may take some time ..."
# Generator created by os.walk:
for in_parent,dirs,files in os.walk(IN_TOP):
out_parent = os.path.join(OUT_TOP,os.path.relpath(in_parent,IN_TOP))
# Copy the directories:
for dir in dirs:
out_path = os.path.join(out_parent,dir)
if not(os.path.exists(out_path) and os.path.isdir(out_path)):
os.mkdir(out_path)
# Process the files:
for file in files:
in_path = os.path.join(in_parent,file)
out_path = get_dest(out_parent,file)
if file_filter(file) and os.path.getsize(in_path)>0 \
and not os.path.exists(out_path):
print "Processing ",in_path," ... "
try:
file_map(in_path,out_path)
print "\tDone."
except:
print "\tAborted?!"
print "All is well. :-)"
# Color-to-Grey:
def img_c2g(in_path,out_path):
image = misc.imread(in_path)
assert(3==len(image.shape) and 3<=image.shape[2])
(M,N) = image.shape[0:2]
new_img = np.zeros((M,N))
for i in range(M):
for j in range(N):
(r,g,b) = image[i][j][0:3]
new_img[i][j] = .299*r+.587*g+.114*b
misc.imsave(out_path,new_img)
def batch_img_c2g(IN_TOP,OUT_TOP):
FILE_PATTERN = re.compile('\S*\.(jpg|png)',re.I)
batch_proc(
IN_TOP,
OUT_TOP,
lambda file: FILE_PATTERN.match(file),
lambda dir,file: os.path.join(dir,"grey_"+file),
img_c2g
)
# Add a pair of square brackets to each file:
def add_sqr_bkt(in_path,out_path):
fin = open(in_path,'r')
fout = open(out_path,'w')
fout.write('[')
line = fin.readline() # contains the line break
while len(line)>0:
fout.write(line)
line = fin.readline()
fout.write(line)
fout.write(']')
fout.close()
fin.close()
def batch_txt_proc(IN_TOP,OUT_TOP):
FILE_PATTERN = re.compile('\d+_data\.txt',re.I)
batch_proc(
IN_TOP,
OUT_TOP,
lambda file: FILE_PATTERN.match(file),
lambda dir,file: os.path.join(dir,file),
add_sqr_bkt
)
| mpl-2.0 |
rsivapr/scikit-learn | sklearn/datasets/lfw.py | 8 | 16527 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
import logging
import numpy as np
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b, u
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) / (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) / (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person: int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
Parameters
----------
subset: optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home: optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
ideaplat/Tback | machinelearning/decisiontreeregression/model.py | 1 | 5544 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from data.dbaccess import normalize
from data.db import get_db_session, Pinkunhu2015
class DecisionTreeRegressionModel(object):
""" 使用线性回归预测下一年人均年收入 """
# 提取的属性
features = [
'tv', 'washing_machine', 'fridge',
'reason', 'is_danger_house', 'is_debt',
'arable_land', 'living_space', 'member_count',
'person_year_total_income', 'year_total_income',
'subsidy_total', 'wood_land',
'help_plan'
]
# 验证的目标
target = 'ny_person_income'
def run(self):
""" 运行 """
# 获取数据
X, Y = self._fetch_data()
clf = self.get_classifier(X, Y)
# 测试
X, Y = self._fetch_test_data()
res = []
for item in range(21):
hit_ratio = self.predict(clf, X, Y, item * 0.02)
res.append([item * 0.02 * 100, hit_ratio * 100])
# 绘制误差与命中率的线性关系图
arr = np.array(res)
plt.plot(arr[:, 0], arr[:, 1]) # 绘制线
plt.plot(arr[:, 0], arr[:, 1], 'ro') # 绘制点
plt.xlabel('误差率(%)')
plt.ylabel('命中率(%)')
plt.title('使用决策树回归预测下一年人均年收入效果图')
plt.show()
def get_classifier(self, X, Y):
""" 构建线性回归模型
:param X: 训练数据
:param Y: 训练数据结果
:return: 模型
"""
clf = DecisionTreeRegressor()
clf.fit(X, Y)
return clf
def predict(self, clf, X, Y, deviation=0.1):
""" 用当前的模型预测
:param clf: 模型
:param X: 测试数据
:param Y: 测试数据结果
:param deviation: 允许误差率
:return: 命中率
"""
Y2 = clf.predict(X)
total, hit = len(Y), 0
for idx, v in enumerate(Y2):
if math.fabs(Y[idx] - v) <= math.fabs(Y[idx] * deviation): # 误差小于deviation,则认为预测准确
hit += 1
print 'Deviation: %d%%, Total: %d, Hit: %d, Precision: %.2f%%' % (100 * deviation, total, hit, 100.0*hit/total)
# 用 A县 的模型去预测 B县 的结果
# Deviation: 0%, Total: 40820, Hit: 0, Precision: 0.00%
# Deviation: 10%, Total: 40820, Hit: 24418, Precision: 59.82%
# Deviation: 20%, Total: 40820, Hit: 32935, Precision: 80.68%
# Deviation: 30%, Total: 40820, Hit: 36211, Precision: 88.71%
# Deviation: 40%, Total: 40820, Hit: 37367, Precision: 91.54%
# Deviation: 50%, Total: 40820, Hit: 38041, Precision: 93.19%
# Deviation: 60%, Total: 40820, Hit: 38502, Precision: 94.32%
# Deviation: 70%, Total: 40820, Hit: 38816, Precision: 95.09%
# Deviation: 80%, Total: 40820, Hit: 39071, Precision: 95.72%
# Deviation: 90%, Total: 40820, Hit: 39282, Precision: 96.23%
# Deviation: 100%, Total: 40820, Hit: 39432, Precision: 96.60%
return hit * 1.0 / total
def _fetch_data(self):
""" 获取建模数据 """
session = get_db_session()
objs = session.query(Pinkunhu2015).filter(
Pinkunhu2015.county == 'A县', Pinkunhu2015.ny_person_income != -1,
Pinkunhu2015.person_year_total_income > 100, Pinkunhu2015.person_year_total_income < 5000,
).all()
X, Y = [], []
for item in objs:
col_list = []
for col in self.features:
normalized_value = normalize(col, getattr(item, col))
col_list.append(normalized_value)
X.append(col_list)
normalized_value = normalize(self.target, getattr(item, self.target))
Y.append(normalized_value)
# # 筛掉可能有错误的数据
# 人均年收入除以100后,查看分布,少于5次的不纳入模型, 效果不佳,废弃
# df = pd.DataFrame(X, columns=self.features)
# print '#df.shape:', df.shape
# df['person_year_total_income'] = df['person_year_total_income'] / 100
# df['person_year_total_income'] = df['person_year_total_income'].astype(int)
# df['person_year_total_income'] = df['person_year_total_income'] * 100
# df = df.groupby('person_year_total_income').filter(lambda x: len(x) > 5)
# print '#df.shape:', df.shape
# X, Y = df.loc[:, self.features[:-1]], df.loc[:, self.target]
return X, Y
def _fetch_test_data(self):
""" 获取测试数据 """
session = get_db_session()
objs = session.query(Pinkunhu2015).filter(
Pinkunhu2015.county == 'B县', Pinkunhu2015.ny_person_income != -1,
Pinkunhu2015.person_year_total_income > 100, Pinkunhu2015.person_year_total_income < 5000,
).all()
X, Y = [], []
for item in objs:
col_list = []
for col in self.features:
normalized_value = normalize(col, getattr(item, col))
col_list.append(normalized_value)
X.append(col_list)
normalized_value = normalize(self.target, getattr(item, self.target))
Y.append(normalized_value)
return X, Y
if __name__ == '__main__':
m = DecisionTreeRegressionModel()
m.run()
| mit |
ianctse/pvlib-python | pvlib/test/test_spa.py | 5 | 14836 | import os
import datetime as dt
import logging
pvl_logger = logging.getLogger('pvlib')
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except ImportError:
pass
import numpy as np
import numpy.testing as npt
import pandas as pd
import unittest
from nose.tools import raises, assert_almost_equals
from nose.plugins.skip import SkipTest
from pvlib.location import Location
try:
from numba import __version__ as numba_version
numba_version_int = int(numba_version.split('.')[0] +
numba_version.split('.')[1])
except ImportError:
numba_version_int = 0
times = pd.date_range('2003-10-17 12:30:30', periods=1, freq='D').tz_localize('MST')
unixtimes = times.tz_convert('UTC').astype(np.int64)*1.0/10**9
lat = 39.742476
lon = -105.1786
elev = 1830.14
pressure = 820
temp = 11
delta_t = 67.0
atmos_refract= 0.5667
JD = 2452930.312847
JC = 0.0379277986858
JDE = 2452930.313623
JCE = 0.037927819916852
JME = 0.003792781991685
L = 24.0182616917
B = -0.0001011219
R = 0.9965422974
Theta = 204.0182616917
beta = 0.0001011219
X0 = 17185.861179
X1 = 1722.893218
X2 = 18234.075703
X3 = 18420.071012
X4 = 51.686951
dPsi = -0.00399840
dEpsilon = 0.00166657
epsilon0 = 84379.672625
epsilon = 23.440465
dTau = -0.005711
lamd = 204.0085519281
v0 = 318.515579
v = 318.511910
alpha = 202.227408
delta = -9.31434
H = 11.10590
xi = 0.002451
dAlpha = -0.000369
alpha_prime = 202.22704
delta_prime = -9.316179
H_prime = 11.10627
e0 = 39.872046
de = 0.016332
e = 39.888378
theta = 50.11162
theta0 = 90 - e0
Gamma = 14.340241
Phi = 194.340241
class SpaBase(object):
"""Test functions common to numpy and numba spa"""
def test_julian_day_dt(self):
dt = times.tz_convert('UTC')[0]
year = dt.year
month = dt.month
day = dt.day
hour = dt.hour
minute = dt.minute
second = dt.second
microsecond = dt.microsecond
assert_almost_equals(JD,
self.spa.julian_day_dt(year, month, day, hour,
minute, second, microsecond), 6)
def test_julian_ephemeris_day(self):
assert_almost_equals(JDE, self.spa.julian_ephemeris_day(JD, delta_t), 5)
def test_julian_century(self):
assert_almost_equals(JC, self.spa.julian_century(JD), 6)
def test_julian_ephemeris_century(self):
assert_almost_equals(JCE, self.spa.julian_ephemeris_century(JDE), 10)
def test_julian_ephemeris_millenium(self):
assert_almost_equals(JME, self.spa.julian_ephemeris_millennium(JCE), 10)
def test_heliocentric_longitude(self):
assert_almost_equals(L, self.spa.heliocentric_longitude(JME), 6)
def test_heliocentric_latitude(self):
assert_almost_equals(B, self.spa.heliocentric_latitude(JME), 6)
def test_heliocentric_radius_vector(self):
assert_almost_equals(R, self.spa.heliocentric_radius_vector(JME), 6)
def test_geocentric_longitude(self):
assert_almost_equals(Theta, self.spa.geocentric_longitude(L), 6)
def test_geocentric_latitude(self):
assert_almost_equals(beta, self.spa.geocentric_latitude(B), 6)
def test_mean_elongation(self):
assert_almost_equals(X0, self.spa.mean_elongation(JCE), 5)
def test_mean_anomaly_sun(self):
assert_almost_equals(X1, self.spa.mean_anomaly_sun(JCE), 5)
def test_mean_anomaly_moon(self):
assert_almost_equals(X2, self.spa.mean_anomaly_moon(JCE), 5)
def test_moon_argument_latitude(self):
assert_almost_equals(X3, self.spa.moon_argument_latitude(JCE), 5)
def test_moon_ascending_longitude(self):
assert_almost_equals(X4, self.spa.moon_ascending_longitude(JCE), 6)
def test_longitude_nutation(self):
assert_almost_equals(dPsi, self.spa.longitude_nutation(JCE, X0, X1, X2,
X3, X4), 6)
def test_obliquity_nutation(self):
assert_almost_equals(dEpsilon, self.spa.obliquity_nutation(JCE, X0, X1,
X2, X3, X4),
6)
def test_mean_ecliptic_obliquity(self):
assert_almost_equals(epsilon0, self.spa.mean_ecliptic_obliquity(JME), 6)
def test_true_ecliptic_obliquity(self):
assert_almost_equals(epsilon, self.spa.true_ecliptic_obliquity(
epsilon0, dEpsilon), 6)
def test_aberration_correction(self):
assert_almost_equals(dTau, self.spa.aberration_correction(R), 6)
def test_apparent_sun_longitude(self):
assert_almost_equals(lamd, self.spa.apparent_sun_longitude(
Theta, dPsi, dTau), 6)
def test_mean_sidereal_time(self):
assert_almost_equals(v0, self.spa.mean_sidereal_time(JD, JC), 3)
def test_apparent_sidereal_time(self):
assert_almost_equals(v, self.spa.apparent_sidereal_time(
v0, dPsi, epsilon), 5)
def test_geocentric_sun_right_ascension(self):
assert_almost_equals(alpha, self.spa.geocentric_sun_right_ascension(
lamd, epsilon, beta), 6)
def test_geocentric_sun_declination(self):
assert_almost_equals(delta, self.spa.geocentric_sun_declination(
lamd, epsilon, beta), 6)
def test_local_hour_angle(self):
assert_almost_equals(H, self.spa.local_hour_angle(v, lon, alpha), 4)
def test_equatorial_horizontal_parallax(self):
assert_almost_equals(xi, self.spa.equatorial_horizontal_parallax(R), 6)
def test_parallax_sun_right_ascension(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equals(dAlpha, self.spa.parallax_sun_right_ascension(
x, xi, H, delta), 4)
def test_topocentric_sun_right_ascension(self):
assert_almost_equals(alpha_prime,
self.spa.topocentric_sun_right_ascension(
alpha, dAlpha), 5)
def test_topocentric_sun_declination(self):
u = self.spa.uterm(lat)
x = self.spa.xterm(u, lat, elev)
y = self.spa.yterm(u, lat, elev)
assert_almost_equals(delta_prime, self.spa.topocentric_sun_declination(
delta, x, y, xi, dAlpha,H), 5)
def test_topocentric_local_hour_angle(self):
assert_almost_equals(H_prime, self.spa.topocentric_local_hour_angle(
H, dAlpha), 5)
def test_topocentric_elevation_angle_without_atmosphere(self):
assert_almost_equals(
e0, self.spa.topocentric_elevation_angle_without_atmosphere(
lat, delta_prime, H_prime), 6)
def test_atmospheric_refraction_correction(self):
assert_almost_equals(de, self.spa.atmospheric_refraction_correction(
pressure, temp, e0, atmos_refract), 6)
def test_topocentric_elevation_angle(self):
assert_almost_equals(e, self.spa.topocentric_elevation_angle(e0, de), 6)
def test_topocentric_zenith_angle(self):
assert_almost_equals(theta, self.spa.topocentric_zenith_angle(e), 5)
def test_topocentric_astronomers_azimuth(self):
assert_almost_equals(Gamma, self.spa.topocentric_astronomers_azimuth(
H_prime, delta_prime, lat), 5)
def test_topocentric_azimuth_angle(self):
assert_almost_equals(Phi, self.spa.topocentric_azimuth_angle(Gamma), 5)
def test_solar_position(self):
npt.assert_almost_equal(
np.array([[theta, theta0, e, e0, Phi]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract)[:-1], 5)
npt.assert_almost_equal(
np.array([[v, alpha, delta]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst=True)[:3], 5)
def test_equation_of_time(self):
eot = 14.64
M = self.spa.sun_mean_longitude(JME)
assert_almost_equals(eot, self.spa.equation_of_time(
M, alpha, dPsi, epsilon), 2)
def test_transit_sunrise_sunset(self):
# tests at greenwich
times = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 0),
dt.datetime(2004, 12, 4, 0)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 7, 8, 15),
dt.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(1996, 7, 5, 17, 1, 4),
dt.datetime(2004, 12, 4, 19, 2, 2)]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, -35.0, 0.0, 64.0, 1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 3)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 3)
times = pd.DatetimeIndex([dt.datetime(1994, 1, 2),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(1994, 1, 2, 16, 59, 55),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(1994, 1, 2, 7, 8, 12),]
).tz_localize('UTC').astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, 35.0, 0.0, 64.0, 1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 3)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 3)
# tests from USNO
# Golden
times = pd.DatetimeIndex([dt.datetime(2015, 1, 2),
dt.datetime(2015, 4, 2),
dt.datetime(2015, 8, 2),
dt.datetime(2015, 12, 2),],
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 7, 19),
dt.datetime(2015, 4, 2, 5, 43),
dt.datetime(2015, 8, 2, 5, 1),
dt.datetime(2015, 12, 2, 7, 1),],
).tz_localize('MST').astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 16, 49),
dt.datetime(2015, 4, 2, 18, 24),
dt.datetime(2015, 8, 2, 19, 10),
dt.datetime(2015, 12, 2, 16, 38),],
).tz_localize('MST').astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, 39.0, -105.0, 64.0, 1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 1)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 1)
# Beijing
times = pd.DatetimeIndex([dt.datetime(2015, 1, 2),
dt.datetime(2015, 4, 2),
dt.datetime(2015, 8, 2),
dt.datetime(2015, 12, 2),],
).tz_localize('UTC').astype(np.int64)*1.0/10**9
sunrise = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 7, 36),
dt.datetime(2015, 4, 2, 5, 58),
dt.datetime(2015, 8, 2, 5, 13),
dt.datetime(2015, 12, 2, 7, 17),],
).tz_localize('Asia/Shanghai'
).astype(np.int64)*1.0/10**9
sunset = pd.DatetimeIndex([dt.datetime(2015, 1, 2, 17, 0),
dt.datetime(2015, 4, 2, 18, 39),
dt.datetime(2015, 8, 2, 19, 28),
dt.datetime(2015, 12, 2, 16, 50),],
).tz_localize('Asia/Shanghai'
).astype(np.int64)*1.0/10**9
result = self.spa.transit_sunrise_sunset(times, 39.917, 116.383, 64.0,1)
npt.assert_almost_equal(sunrise/1e3, result[1]/1e3, 1)
npt.assert_almost_equal(sunset/1e3, result[2]/1e3, 1)
class NumpySpaTest(unittest.TestCase, SpaBase):
"""Import spa without compiling to numba then run tests"""
@classmethod
def setUpClass(self):
os.environ['PVLIB_USE_NUMBA'] = '0'
import pvlib.spa as spa
spa = reload(spa)
self.spa = spa
@classmethod
def tearDownClass(self):
del os.environ['PVLIB_USE_NUMBA']
def test_julian_day(self):
assert_almost_equals(JD, self.spa.julian_day(unixtimes)[0], 6)
@unittest.skipIf(numba_version_int < 17,
'Numba not installed or version not >= 0.17.0')
class NumbaSpaTest(unittest.TestCase, SpaBase):
"""Import spa, compiling to numba, and run tests"""
@classmethod
def setUpClass(self):
os.environ['PVLIB_USE_NUMBA'] = '1'
if numba_version_int >= 17:
import pvlib.spa as spa
spa = reload(spa)
self.spa = spa
@classmethod
def tearDownClass(self):
del os.environ['PVLIB_USE_NUMBA']
def test_julian_day(self):
assert_almost_equals(JD, self.spa.julian_day(unixtimes[0]), 6)
def test_solar_position_singlethreaded(self):
npt.assert_almost_equal(
np.array([[theta, theta0, e, e0, Phi]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=1)[:-1], 5)
npt.assert_almost_equal(
np.array([[v, alpha, delta]]).T, self.spa.solar_position(
unixtimes, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=1, sst=True)[:3], 5)
def test_solar_position_multithreaded(self):
result = np.array([theta, theta0, e, e0, Phi])
nresult = np.array([result, result, result]).T
times = np.array([unixtimes[0], unixtimes[0], unixtimes[0]])
npt.assert_almost_equal(
nresult
, self.spa.solar_position(
times, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8)[:-1], 5)
result = np.array([v, alpha, delta])
nresult = np.array([result, result, result]).T
npt.assert_almost_equal(
nresult
, self.spa.solar_position(
times, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads=8, sst=True)[:3], 5)
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardShear/Shear_Zone_Length/SZ_h_1/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
cdegroc/scikit-learn | sklearn/linear_model/__init__.py | 2 | 1329 | """
The :mod:`sklearn.linear_model` module implements genelarized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import Lars, LassoLars, lars_path, LARS, LassoLARS, \
LarsCV, LassoLarsCV, LassoLarsIC
from .coordinate_descent import Lasso, ElasticNet, LassoCV, ElasticNetCV, \
lasso_path, enet_path
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV, \
ridge_regression
from .logistic import LogisticRegression
from .omp import orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit
from .perceptron import Perceptron
from .randomized_l1 import RandomizedLasso, RandomizedLogisticRegression, \
lasso_stability_path
from . import sparse
| bsd-3-clause |
pascalgutjahr/Praktikum-1 | GeometOptik/abbeB.py | 1 | 1504 | import matplotlib as mpl
from scipy.optimize import curve_fit
mpl.use('pgf')
import matplotlib.pyplot as plt
plt.rcParams['lines.linewidth'] = 1
import numpy as np
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
# V1 = 1 + 1/V
# V2 = 1 + V
# g = np.array([ 17.6, 16.0, 18.4, 19.5, 21.2, 25.1, 17.3, 17.3, 18.7, 21.0 ])
# V1 = np.array([ 4.76, 4.6, 4.84, 4.95, 5.12, 5.51, 4.73, 4.73, 4.87, 5.1 ])
# b = np.array([ 87.4, 84.0, 76.6, 70.5, 63.8, 54.9, 92.7, 85.2, 73.8, 66.5])*10**(-2)
b = np.array([66.5, 73.8, 85.2, 92.7, 70.5, 76.6, 84.0, 87.4])*10**(-2)
# V2 = np.array([ 11.74, 11.4, 10.66, 10.05, 9.38, 8.49, 12.27, 11.52, 10.38, 9.65])
# V1 = np.array([ 4.97, 5.25, 4.16, 3.62, 3.01, 2.19, 5.36, 4.92, 3.95, 3.17])
V2 = np.array([ 0.52, 0.57, 0.66, 0.78, 0.47, 0.56, 0.69, 0.82])
# V2 = np.array([0.82, 0.69, 0.56, 0.47, 1.16, 0.89, 0.78, 0.66, 0.57, 0.52])
def f(V2, m, n):
return m * (1+V2) + n
params, covariance = curve_fit(f, V2, b)
errors = np.sqrt(np.diag(covariance))
print('m =', params[0], '+-', errors[0])
print('n =', params[1], '+-', errors[1])
plt.plot(1+V2, f(V2, *params), 'k-', label='linearer Fit: b')
plt.plot(1+V2, b, '.', color='red')
plt.xlabel(r'$1 + V$')
plt.ylabel(r'$b´\,/\,\si{\meter}$')
plt.tight_layout()
plt.legend()
plt.grid()
plt.savefig('bilder/abbeB.pdf')
| mit |
rahlk/CSC579__Computer_Performance_Modeling | simulation/proj2/tasks/task1.py | 1 | 1098 | """
Task 1
Plot average customer waiting time vs. rho (0.05, 0.15, ... , 0.95).
"""
from __future__ import print_function
from __future__ import division
import os, sys
root = os.path.join(os.getcwd().split("proj2")[0], "proj2")
if not root in sys.path:
sys.path.append(root)
import numpy as np
import pandas as pd
from pdb import set_trace
from Utils.SimUtil import Simulation
from Utils.MscUtil import Params
if __name__ == "__main__":
for decip in ["SJF", "LCFS", "FCFS"]:
print(decip)
for r in np.arange(0.05,1,0.1):
waits = []
for _ in xrange(10):
sim = Simulation(params = Params(K=40, dicipline=decip, C=1e5, rho = r, lmbd=1))
sim = sim.run_simulation()
waits.append(np.mean([cust.get_wait_time() for cust in sim.customers]))
print("{}\t{}\t{}\t{}".format(r, np.mean(waits),
np.mean(waits)-1.6*np.std(waits),
np.mean(waits)+1.6*np.std(waits)))
print("\n------------------\n")
set_trace()
| mit |
waqasbhatti/astrobase | astrobase/lcproc/catalogs.py | 2 | 57723 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# catalogs.py - Waqas Bhatti ([email protected]) - Feb 2019
'''
This contains functions to generate light curve catalogs from collections of
light curves.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import pickle
import os
import os.path
import glob
import shutil
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import numpy.random as npr
npr.seed(0xc0ffee)
import scipy.spatial as sps
import astropy.io.fits as pyfits
from astropy.wcs import WCS
from astropy.visualization import ZScaleInterval, LinearStretch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
from tqdm import tqdm
TQDM = True
except Exception:
TQDM = False
pass
# to turn a list of keys into a dict address
# from https://stackoverflow.com/a/14692747
from functools import reduce
from operator import getitem
def _dict_get(datadict, keylist):
return reduce(getitem, keylist, datadict)
############
## CONFIG ##
############
NCPUS = mp.cpu_count()
# these translate filter operators given as strings to Python operators
FILTEROPS = {'eq':'==',
'gt':'>',
'ge':'>=',
'lt':'<',
'le':'<=',
'ne':'!='}
###################
## LOCAL IMPORTS ##
###################
from astrobase.plotbase import fits_finder_chart
from astrobase.cpserver.checkplotlist import checkplot_infokey_worker
from astrobase.lcproc import get_lcformat
#####################################################
## FUNCTIONS TO GENERATE OBJECT CATALOGS (LCLISTS) ##
#####################################################
def _lclist_parallel_worker(task):
'''This is a parallel worker for makelclist.
Parameters
----------
task : tuple
This is a tuple containing the following items:
task[0] = lcf
task[1] = columns
task[2] = lcformat
task[3] = lcformatdir
task[4] = lcndetkey
Returns
-------
dict or None
This contains all of the info for the object processed in this LC read
operation. If this fails, returns None
'''
lcf, columns, lcformat, lcformatdir, lcndetkey = task
# get the bits needed for lcformat handling
# NOTE: we re-import things in this worker function because sometimes
# functions can't be pickled correctly for passing them to worker functions
# in a processing pool
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# we store the full path of the light curve
lcobjdict = {'lcfname':os.path.abspath(lcf)}
try:
# read the light curve in
lcdict = readerfunc(lcf)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# insert all of the columns
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
try:
thiscolval = _dict_get(lcdict, getkey)
except Exception:
LOGWARNING('column %s does not exist for %s' %
(colkey, lcf))
thiscolval = np.nan
# update the lcobjdict with this value
lcobjdict[getkey[-1]] = thiscolval
except Exception:
LOGEXCEPTION('could not figure out columns for %s' % lcf)
# insert all of the columns as nans
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
thiscolval = np.nan
# update the lclistdict with this value
lcobjdict[getkey[-1]] = thiscolval
# now get the actual ndets; this excludes nans and infs
for dk in lcndetkey:
try:
if '.' in dk:
getdk = dk.split('.')
else:
getdk = [dk]
ndetcol = _dict_get(lcdict, getdk)
actualndets = ndetcol[np.isfinite(ndetcol)].size
lcobjdict['%s.ndet' % getdk[-1]] = actualndets
except Exception:
lcobjdict['%s.ndet' % getdk[-1]] = np.nan
return lcobjdict
def make_lclist(basedir,
outfile,
use_list_of_filenames=None,
lcformat='hat-sql',
lcformatdir=None,
fileglob=None,
recursive=True,
columns=('objectid',
'objectinfo.ra',
'objectinfo.decl',
'objectinfo.ndet'),
makecoordindex=('objectinfo.ra','objectinfo.decl'),
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
maxlcs=None,
nworkers=NCPUS):
'''This generates a light curve catalog for all light curves in a directory.
Given a base directory where all the files are, and a light curve format,
this will find all light curves, pull out the keys in each lcdict requested
in the `columns` kwarg for each object, and write them to the requested
output pickle file. These keys should be pointers to scalar values
(i.e. something like `objectinfo.ra` is OK, but something like 'times' won't
work because it's a vector).
Generally, this works with light curve reading functions that produce
lcdicts as detailed in the docstring for `lcproc.register_lcformat`. Once
you've registered your light curve reader functions using the
`lcproc.register_lcformat` function, pass in the `formatkey` associated with
your light curve format, and this function will be able to read all light
curves in that format as well as the object information stored in their
`objectinfo` dict.
Parameters
----------
basedir : str or list of str
If this is a str, points to a single directory to search for light
curves. If this is a list of str, it must be a list of directories to
search for light curves. All of these will be searched to find light
curve files matching either your light curve format's default fileglob
(when you registered your LC format), or a specific fileglob that you
can pass in using the `fileglob` kwargh here. If the `recursive` kwarg
is set, the provided directories will be searched recursively.
If `use_list_of_filenames` is not None, it will override this argument
and the function will take those light curves as the list of files it
must process instead of whatever is specified in `basedir`.
outfile : str
This is the name of the output file to write. This will be a pickle
file, so a good convention to use for this name is something like
'my-lightcurve-catalog.pkl'.
use_list_of_filenames : list of str or None
Use this kwarg to override whatever is provided in `basedir` and
directly pass in a list of light curve files to process. This can speed
up this function by a lot because no searches on disk will be performed
to find light curve files matching `basedir` and `fileglob`.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
fileglob : str or None
If provided, is a string that is a valid UNIX filename glob. Used to
override the default fileglob for this LC format when searching for
light curve files in `basedir`.
recursive : bool
If True, the directories specified in `basedir` will be searched
recursively for all light curve files that match the default fileglob
for this LC format or a specific one provided in `fileglob`.
columns : list of str
This is a list of keys in the lcdict produced by your light curve reader
function that contain object information, which will be extracted and
put into the output light curve catalog. It's highly recommended that
your LC reader function produce a lcdict that contains at least the
default keys shown here.
The lcdict keys to extract are specified by using an address scheme:
- First level dict keys can be specified directly:
e.g., 'objectid' will extract lcdict['objectid']
- Keys at other levels can be specified by using a period to indicate
the level:
- e.g., 'objectinfo.ra' will extract lcdict['objectinfo']['ra']
- e.g., 'objectinfo.varinfo.features.stetsonj' will extract
lcdict['objectinfo']['varinfo']['features']['stetsonj']
makecoordindex : list of two str or None
This is used to specify which lcdict keys contain the right ascension
and declination coordinates for this object. If these are provided, the
output light curve catalog will have a kdtree built on all object
coordinates, which enables fast spatial searches and cross-matching to
external catalogs by `checkplot` and `lcproc` functions.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
maxlcs : int or None
This sets how many light curves to process in the input LC list
generated by searching for LCs in `basedir` or in the list provided as
`use_list_of_filenames`.
nworkers : int
This sets the number of parallel workers to launch to collect
information from the light curves.
Returns
-------
str
Returns the path to the generated light curve catalog pickle file.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not fileglob:
fileglob = dfileglob
# this is to get the actual ndet
# set to the magnitudes column
lcndetkey = dmagcols
if isinstance(use_list_of_filenames, list):
matching = use_list_of_filenames
else:
# handle the case where basedir is a list of directories
if isinstance(basedir, list):
matching = []
for bdir in basedir:
# now find the files
LOGINFO('searching for %s light curves in %s ...' % (lcformat,
bdir))
if recursive is False:
matching.extend(glob.glob(os.path.join(bdir, fileglob)))
else:
matching.extend(glob.glob(os.path.join(bdir,
'**',
fileglob),
recursive=True))
# otherwise, handle the usual case of one basedir to search in
else:
# now find the files
LOGINFO('searching for %s light curves in %s ...' %
(lcformat, basedir))
if recursive is False:
matching = glob.glob(os.path.join(basedir, fileglob))
else:
matching = glob.glob(os.path.join(basedir,
'**',
fileglob),recursive=True)
#
# now that we have all the files, process them
#
if matching and len(matching) > 0:
LOGINFO('found %s light curves' % len(matching))
# cut down matching to maxlcs
if maxlcs:
matching = matching[:maxlcs]
# prepare the output dict
lclistdict = {
'basedir':basedir,
'lcformat':lcformat,
'fileglob':fileglob,
'recursive':recursive,
'columns':columns,
'makecoordindex':makecoordindex,
'nfiles':len(matching),
'objects': {
}
}
# columns that will always be present in the output lclistdict
derefcols = ['lcfname']
derefcols.extend(['%s.ndet' % x.split('.')[-1] for x in lcndetkey])
for dc in derefcols:
lclistdict['objects'][dc] = []
# fill in the rest of the lclist columns from the columns kwarg
for col in columns:
# dereference the column
thiscol = col.split('.')
thiscol = thiscol[-1]
lclistdict['objects'][thiscol] = []
derefcols.append(thiscol)
# start collecting info
LOGINFO('collecting light curve info...')
tasks = [(x, columns, lcformat, lcformatdir, lcndetkey)
for x in matching]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
results = executor.map(_lclist_parallel_worker, tasks)
results = list(results)
# update the columns in the overall dict from the results of the
# parallel map
for result in results:
for xcol in derefcols:
lclistdict['objects'][xcol].append(result[xcol])
executor.shutdown()
# done with collecting info
# turn all of the lists in the lclistdict into arrays
for col in lclistdict['objects']:
lclistdict['objects'][col] = np.array(lclistdict['objects'][col])
# handle duplicate objectids with different light curves
uniques, counts = np.unique(lclistdict['objects']['objectid'],
return_counts=True)
duplicated_objectids = uniques[counts > 1]
if duplicated_objectids.size > 0:
# redo the objectid array so it has a bit larger dtype so the extra
# tag can fit into the field
dt = lclistdict['objects']['objectid'].dtype.str
dt = '<U%s' % (
int(dt.replace('<','').replace('U','').replace('S','')) + 3
)
lclistdict['objects']['objectid'] = np.array(
lclistdict['objects']['objectid'],
dtype=dt
)
for objid in duplicated_objectids:
objid_inds = np.where(
lclistdict['objects']['objectid'] == objid
)
# mark the duplicates, assume the first instance is the actual
# one
for ncounter, nind in enumerate(objid_inds[0][1:]):
lclistdict['objects']['objectid'][nind] = '%s-%s' % (
lclistdict['objects']['objectid'][nind],
ncounter+2
)
LOGWARNING(
'tagging duplicated instance %s of objectid: '
'%s as %s-%s, lightcurve: %s' %
(ncounter+2, objid, objid, ncounter+2,
lclistdict['objects']['lcfname'][nind])
)
# if we're supposed to make a spatial index, do so
if (makecoordindex and
isinstance(makecoordindex, (list, tuple)) and
len(makecoordindex) == 2):
try:
# deref the column names
racol, declcol = makecoordindex
racol = racol.split('.')[-1]
declcol = declcol.split('.')[-1]
# get the ras and decls
objra, objdecl = (lclistdict['objects'][racol],
lclistdict['objects'][declcol])
# get the xyz unit vectors from ra,decl
# since i had to remind myself:
# https://en.wikipedia.org/wiki/Equatorial_coordinate_system
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))
# generate the kdtree
kdt = sps.cKDTree(xyz,copy_data=True)
# put the tree into the dict
lclistdict['kdtree'] = kdt
LOGINFO('kdtree generated for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
except Exception:
LOGEXCEPTION('could not make kdtree for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
raise
# generate the xy pairs if fieldfits is not None
if field_fitsfile and os.path.exists(field_fitsfile):
# read in the FITS file
if field_wcsfrom is None:
hdulist = pyfits.open(field_fitsfile)
hdr = hdulist[0].header
hdulist.close()
w = WCS(hdr)
wcsok = True
elif os.path.exists(field_wcsfrom):
w = WCS(field_wcsfrom)
wcsok = True
else:
LOGERROR('could not determine WCS info for input FITS: %s' %
field_fitsfile)
wcsok = False
if wcsok:
# first, transform the ra/decl to x/y and put these in the
# lclist output dict
radecl = np.column_stack((objra, objdecl))
lclistdict['objects']['framexy'] = w.all_world2pix(
radecl,
1
)
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(outfile),
os.path.splitext(os.path.basename(outfile))[0] + '.png'
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=objra,
overlay_decl=objdecl,
overlay_pltopts=field_pltopts,
overlay_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this LC list: %s' % finder_png)
# write the pickle
with open(outfile,'wb') as outfd:
pickle.dump(lclistdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
LOGINFO('done. LC info -> %s' % outfile)
return outfile
else:
LOGERROR('no files found in %s matching %s' % (basedir, fileglob))
return None
def filter_lclist(lc_catalog,
objectidcol='objectid',
racol='ra',
declcol='decl',
xmatchexternal=None,
xmatchdistarcsec=3.0,
externalcolnums=(0,1,2),
externalcolnames=('objectid','ra','decl'),
externalcoldtypes='U20,f8,f8',
externalcolsep=None,
externalcommentchar='#',
conesearch=None,
conesearchworkers=1,
columnfilters=None,
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
copylcsto=None):
'''This is used to perform cone-search, cross-match, and column-filter
operations on a light curve catalog generated by `make_lclist`.
Uses the output of `make_lclist` above. This function returns a list of
light curves matching various criteria specified by the `xmatchexternal`,
`conesearch`, and `columnfilters kwargs`. Use this function to generate
input lists for other lcproc functions,
e.g. `lcproc.lcvfeatures.parallel_varfeatures`,
`lcproc.periodfinding.parallel_pf`, and `lcproc.lcbin.parallel_timebin`,
among others.
The operations are applied in this order if more than one is specified:
`xmatchexternal` -> `conesearch` -> `columnfilters`. All results from these
operations are joined using a logical AND operation.
Parameters
----------
objectidcol : str
This is the name of the object ID column in the light curve catalog.
racol : str
This is the name of the RA column in the light curve catalog.
declcol : str
This is the name of the Dec column in the light curve catalog.
xmatchexternal : str or None
If provided, this is the filename of a text file containing objectids,
ras and decs to match the objects in the light curve catalog to by their
positions.
xmatchdistarcsec : float
This is the distance in arcseconds to use when cross-matching to the
external catalog in `xmatchexternal`.
externalcolnums : sequence of int
This a list of the zero-indexed column numbers of columns to extract
from the external catalog file.
externalcolnames : sequence of str
This is a list of names of columns that will be extracted from the
external catalog file. This is the same length as
`externalcolnums`. These must contain the names provided as the
`objectid`, `ra`, and `decl` column names so this function knows which
column numbers correspond to those columns and can use them to set up
the cross-match.
externalcoldtypes : str
This is a CSV string containing numpy dtype definitions for all columns
listed to extract from the external catalog file. The number of dtype
definitions should be equal to the number of columns to extract.
externalcolsep : str or None
The column separator to use when extracting columns from the external
catalog file. If None, any whitespace between columns is used as the
separator.
externalcommentchar : str
The character indicating that a line in the external catalog file is to
be ignored.
conesearch : list of float
This is used to specify cone-search parameters. It should be a three
element list:
[center_ra_deg, center_decl_deg, search_radius_deg]
conesearchworkers : int
The number of parallel workers to launch for the cone-search operation.
columnfilters : list of str
This is a list of strings indicating any filters to apply on each column
in the light curve catalog. All column filters are applied in the
specified sequence and are combined with a logical AND operator. The
format of each filter string should be:
'<lc_catalog column>|<operator>|<operand>'
where:
- <lc_catalog column> is a column in the lc_catalog pickle file
- <operator> is one of: 'lt', 'gt', 'le', 'ge', 'eq', 'ne', which
correspond to the usual operators: <, >, <=, >=, ==, != respectively.
- <operand> is a float, int, or string.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
copylcsto : str
If this is provided, it is interpreted as a directory target to copy
all the light curves that match the specified conditions.
Returns
-------
tuple
Returns a two elem tuple: (matching_object_lcfiles, matching_objectids)
if conesearch and/or column filters are used. If `xmatchexternal` is
also used, a three-elem tuple is returned: (matching_object_lcfiles,
matching_objectids, extcat_matched_objectids).
'''
with open(lc_catalog,'rb') as infd:
lclist = pickle.load(infd)
# generate numpy arrays of the matching object indexes. we do it this way so
# we can AND everything at the end, instead of having to look up the objects
# at these indices and running the columnfilter on them
xmatch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
conesearch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
# do the xmatch first
ext_matches = []
ext_matching_objects = []
if (xmatchexternal and
isinstance(xmatchexternal, str) and
os.path.exists(xmatchexternal)):
try:
# read in the external file
extcat = np.genfromtxt(xmatchexternal,
usecols=externalcolnums,
delimiter=externalcolsep,
names=externalcolnames,
dtype=externalcoldtypes,
comments=externalcommentchar)
ext_cosdecl = np.cos(np.radians(extcat['decl']))
ext_sindecl = np.sin(np.radians(extcat['decl']))
ext_cosra = np.cos(np.radians(extcat['ra']))
ext_sinra = np.sin(np.radians(extcat['ra']))
ext_xyz = np.column_stack((ext_cosra*ext_cosdecl,
ext_sinra*ext_cosdecl,
ext_sindecl))
ext_xyzdist = 2.0 * np.sin(np.radians(xmatchdistarcsec/3600.0)/2.0)
# get our kdtree
our_kdt = lclist['kdtree']
# get the external kdtree
ext_kdt = sps.cKDTree(ext_xyz)
# do a query_ball_tree
extkd_matchinds = ext_kdt.query_ball_tree(our_kdt, ext_xyzdist)
for extind, mind in enumerate(extkd_matchinds):
if len(mind) > 0:
ext_matches.append(mind[0])
# get the whole matching row for the ext objects recarray
ext_matching_objects.append(extcat[extind])
ext_matches = np.array(ext_matches)
if ext_matches.size > 0:
# update the xmatch_matching_index
xmatch_matching_index[ext_matches] = True
LOGINFO('xmatch: objects matched to %s within %.1f arcsec: %s' %
(xmatchexternal, xmatchdistarcsec, ext_matches.size))
else:
LOGERROR("xmatch: no objects were cross-matched to external "
"catalog spec: %s, can't continue" % xmatchexternal)
return None, None, None
except Exception:
LOGEXCEPTION('could not match to external catalog spec: %s' %
repr(xmatchexternal))
raise
# do the cone search next
if (conesearch and
isinstance(conesearch, (list, tuple)) and
len(conesearch) == 3):
try:
racenter, declcenter, searchradius = conesearch
cosdecl = np.cos(np.radians(declcenter))
sindecl = np.sin(np.radians(declcenter))
cosra = np.cos(np.radians(racenter))
sinra = np.sin(np.radians(racenter))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(searchradius)/2.0)
# get the kdtree
our_kdt = lclist['kdtree']
# look up the coordinates
kdtindices = our_kdt.query_ball_point([cosra*cosdecl,
sinra*cosdecl,
sindecl],
xyzdist,
n_jobs=conesearchworkers)
if kdtindices and len(kdtindices) > 0:
LOGINFO('cone search: objects within %.4f deg '
'of (%.3f, %.3f): %s' %
(searchradius, racenter, declcenter, len(kdtindices)))
# update the conesearch_matching_index
matchingind = kdtindices
conesearch_matching_index[np.array(matchingind)] = True
# we fail immediately if we found nothing. this assumes the user
# cares more about the cone-search than the regular column filters
else:
LOGERROR("cone-search: no objects were found within "
"%.4f deg of (%.3f, %.3f): %s, can't continue" %
(searchradius, racenter, declcenter, len(kdtindices)))
return None, None
except Exception:
LOGEXCEPTION('cone-search: could not run a cone-search, '
'is there a kdtree present in %s?' % lc_catalog)
raise
# now that we're done with cone-search, do the column filtering
allfilterinds = []
if columnfilters and isinstance(columnfilters, list):
# go through each filter
for cfilt in columnfilters:
try:
fcol, foperator, foperand = cfilt.split('|')
foperator = FILTEROPS[foperator]
# generate the evalstring
filterstr = (
"np.isfinite(lclist['objects']['%s']) & "
"(lclist['objects']['%s'] %s %s)"
) % (fcol, fcol, foperator, foperand)
filterind = eval(filterstr)
ngood = lclist['objects'][objectidcol][filterind].size
LOGINFO('filter: %s -> objects matching: %s ' % (cfilt, ngood))
allfilterinds.append(filterind)
except Exception:
LOGEXCEPTION('filter: could not understand filter spec: %s'
% cfilt)
LOGWARNING('filter: not applying this broken filter')
# now that we have all the filter indices good to go
# logical-AND all the things
# make sure we only do filtering if we were told to do so
if (xmatchexternal or conesearch or columnfilters):
filterstack = []
if xmatchexternal:
filterstack.append(xmatch_matching_index)
if conesearch:
filterstack.append(conesearch_matching_index)
if columnfilters:
filterstack.extend(allfilterinds)
finalfilterind = np.column_stack(filterstack)
finalfilterind = np.all(finalfilterind, axis=1)
# get the filtered object light curves and object names
filteredobjectids = lclist['objects'][objectidcol][finalfilterind]
filteredlcfnames = lclist['objects']['lcfname'][finalfilterind]
else:
filteredobjectids = lclist['objects'][objectidcol]
filteredlcfnames = lclist['objects']['lcfname']
# if we're told to make a finder chart with the selected objects
if field_fitsfile is not None and os.path.exists(field_fitsfile):
# get the RA and DEC of the matching objects
matching_ra = lclist['objects'][racol][finalfilterind]
matching_decl = lclist['objects'][declcol][finalfilterind]
matching_postfix = []
if xmatchexternal is not None:
matching_postfix.append(
'xmatch_%s' %
os.path.splitext(os.path.basename(xmatchexternal))[0]
)
if conesearch is not None:
matching_postfix.append('conesearch_RA%.3f_DEC%.3f_RAD%.5f' %
tuple(conesearch))
if columnfilters is not None:
for cfi, cf in enumerate(columnfilters):
if cfi == 0:
matching_postfix.append('filter_%s_%s_%s' %
tuple(cf.split('|')))
else:
matching_postfix.append('_and_%s_%s_%s' %
tuple(cf.split('|')))
if len(matching_postfix) > 0:
matching_postfix = '-%s' % '_'.join(matching_postfix)
else:
matching_postfix = ''
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(lc_catalog),
'%s%s.png' %
(os.path.splitext(os.path.basename(lc_catalog))[0],
matching_postfix)
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=matching_ra,
overlay_decl=matching_decl,
overlay_pltopts=field_pltopts,
field_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this filtered LC list: %s' % finder_png)
# if copylcsto is not None, copy LCs over to it
if copylcsto is not None:
if not os.path.exists(copylcsto):
os.mkdir(copylcsto)
if TQDM:
lciter = tqdm(filteredlcfnames)
else:
lciter = filteredlcfnames
LOGINFO('copying matching light curves to %s' % copylcsto)
for lc in lciter:
shutil.copy(lc, copylcsto)
LOGINFO('done. objects matching all filters: %s' % filteredobjectids.size)
if xmatchexternal and len(ext_matching_objects) > 0:
return filteredlcfnames, filteredobjectids, ext_matching_objects
else:
return filteredlcfnames, filteredobjectids
############################################################
## ADDING CHECKPLOT INFO BACK TO THE LIGHT CURVE CATALOGS ##
############################################################
def _cpinfo_key_worker(task):
'''This wraps `checkplotlist.checkplot_infokey_worker`.
This is used to get the correct dtype for each element in retrieved results.
Parameters
----------
task : tuple
task[0] = cpfile
task[1] = keyspeclist (infokeys kwarg from `add_cpinfo_to_lclist`)
Returns
-------
dict
All of the requested keys from the checkplot are returned along with
their values in a dict.
'''
cpfile, keyspeclist = task
keystoget = [x[0] for x in keyspeclist]
nonesubs = [x[-2] for x in keyspeclist]
nansubs = [x[-1] for x in keyspeclist]
# reform the keystoget into a list of lists
for i, k in enumerate(keystoget):
thisk = k.split('.')
thisk = [(int(x) if x.isdecimal() else x) for x in thisk]
keystoget[i] = thisk
# add in the objectid as well to match to the object catalog later
keystoget.insert(0,['objectid'])
nonesubs.insert(0, '')
nansubs.insert(0,'')
# get all the keys we need
vals = checkplot_infokey_worker((cpfile, keystoget))
# if they have some Nones, nans, etc., reform them as expected
for val, nonesub, nansub, valind in zip(vals, nonesubs,
nansubs, range(len(vals))):
if val is None:
outval = nonesub
elif isinstance(val, float) and not np.isfinite(val):
outval = nansub
elif isinstance(val, (list, tuple)):
outval = ', '.join(val)
else:
outval = val
vals[valind] = outval
return vals
CPINFO_DEFAULTKEYS = [
# key, dtype, first level, overwrite=T|append=F, None sub, nan sub
('comments',
np.unicode_, False, True, '', ''),
('objectinfo.objecttags',
np.unicode_, True, True, '', ''),
('objectinfo.twomassid',
np.unicode_, True, True, '', ''),
('objectinfo.bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.dered_sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_bmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_vmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_rmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_imag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_jmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_hmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_kmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssu',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssg',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssr',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssi',
np.float_, True, True, np.nan, np.nan),
('objectinfo.extinction_sdssz',
np.float_, True, True, np.nan, np.nan),
('objectinfo.color_classes',
np.unicode_, True, True, '', ''),
('objectinfo.pmra',
np.float_, True, True, np.nan, np.nan),
('objectinfo.pmdecl',
np.float_, True, True, np.nan, np.nan),
('objectinfo.propermotion',
np.float_, True, True, np.nan, np.nan),
('objectinfo.rpmj',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gl',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gb',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_status',
np.unicode_, True, True, '', ''),
('objectinfo.gaia_ids.0',
np.unicode_, True, True, '', ''),
('objectinfo.gaiamag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_parallax',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_parallax_err',
np.float_, True, True, np.nan, np.nan),
('objectinfo.gaia_absmag',
np.float_, True, True, np.nan, np.nan),
('objectinfo.simbad_best_mainid',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_objtype',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_allids',
np.unicode_, True, True, '', ''),
('objectinfo.simbad_best_distarcsec',
np.float_, True, True, np.nan, np.nan),
#
# TIC info
#
('objectinfo.ticid',
np.unicode_, True, True, '', ''),
('objectinfo.tic_version',
np.unicode_, True, True, '', ''),
('objectinfo.tessmag',
np.float_, True, True, np.nan, np.nan),
#
# variability info
#
('varinfo.vartags',
np.unicode_, False, True, '', ''),
('varinfo.varperiod',
np.float_, False, True, np.nan, np.nan),
('varinfo.varepoch',
np.float_, False, True, np.nan, np.nan),
('varinfo.varisperiodic',
np.int_, False, True, 0, 0),
('varinfo.objectisvar',
np.int_, False, True, 0, 0),
('varinfo.features.median',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.mad',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stdev',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.mag_iqr',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.skew',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.kurtosis',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stetsonj',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.stetsonk',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.eta_normal',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.linear_fit_slope',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.magnitude_ratio',
np.float_, False, True, np.nan, np.nan),
('varinfo.features.beyond1std',
np.float_, False, True, np.nan, np.nan)
]
def add_cpinfo_to_lclist(
checkplots, # list or a directory path
initial_lc_catalog,
magcol, # to indicate checkplot magcol
outfile,
checkplotglob='checkplot*.pkl*',
infokeys=CPINFO_DEFAULTKEYS,
nworkers=NCPUS
):
'''This adds checkplot info to the initial light curve catalogs generated by
`make_lclist`.
This is used to incorporate all the extra info checkplots can have for
objects back into columns in the light curve catalog produced by
`make_lclist`. Objects are matched between the checkplots and the light
curve catalog using their `objectid`. This then allows one to search this
'augmented' light curve catalog by these extra columns. The 'augmented'
light curve catalog also forms the basis for search interface provided by
the LCC-Server.
The default list of keys that will be extracted from a checkplot and added
as columns in the initial light curve catalog is listed above in the
`CPINFO_DEFAULTKEYS` list.
Parameters
----------
checkplots : str or list
If this is a str, is interpreted as a directory which will be searched
for checkplot pickle files using `checkplotglob`. If this is a list, it
will be interpreted as a list of checkplot pickle files to process.
initial_lc_catalog : str
This is the path to the light curve catalog pickle made by
`make_lclist`.
magcol : str
This is used to indicate the light curve magnitude column to extract
magnitude column specific information. For example, Stetson variability
indices can be generated using magnitude measurements in separate
photometric apertures, which appear in separate `magcols` in the
checkplot. To associate each such feature of the object with its
specific `magcol`, pass that `magcol` in here. This `magcol` will then
be added as a prefix to the resulting column in the 'augmented' LC
catalog, e.g. Stetson J will appear as `magcol1_stetsonj` and
`magcol2_stetsonj` for two separate magcols.
outfile : str
This is the file name of the output 'augmented' light curve catalog
pickle file that will be written.
infokeys : list of tuples
This is a list of keys to extract from the checkplot and some info on
how this extraction is to be done. Each key entry is a six-element
tuple of the following form:
- key name in the checkplot
- numpy dtype of the value of this key
- False if key is associated with a magcol or True otherwise
- False if subsequent updates to the same column name will append to
existing key values in the output augmented light curve catalog or
True if these will overwrite the existing key value
- character to use to substitute a None value of the key in the
checkplot in the output light curve catalog column
- character to use to substitute a nan value of the key in the
checkplot in the output light curve catalog column
See the `CPFINFO_DEFAULTKEYS` list above for examples.
nworkers : int
The number of parallel workers to launch to extract checkplot
information.
Returns
-------
str
Returns the path to the generated 'augmented' light curve catalog pickle
file.
'''
# get the checkplots from the directory if one is provided
if not isinstance(checkplots, list) and os.path.exists(checkplots):
checkplots = sorted(glob.glob(os.path.join(checkplots, checkplotglob)))
tasklist = [(cpf, infokeys) for cpf in checkplots]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(_cpinfo_key_worker, tasklist)
results = list(resultfutures)
executor.shutdown()
# now that we have all the checkplot info, we need to match to the
# objectlist in the lclist
# open the lclist
with open(initial_lc_catalog,'rb') as infd:
lc_catalog = pickle.load(infd)
# convert the lc_catalog['columns'] item to a list if it's not
# this is so we can append columns to it later
lc_catalog['columns'] = list(lc_catalog['columns'])
catalog_objectids = np.array(lc_catalog['objects']['objectid'])
checkplot_objectids = np.array([x[0] for x in results])
# add the extra key arrays in the lclist dict
extrainfokeys = []
actualkeys = []
# set up the extrainfokeys list
for keyspec in infokeys:
key, dtype, firstlevel, overwrite_append, nonesub, nansub = keyspec
if firstlevel:
eik = key
else:
eik = '%s.%s' % (magcol, key)
extrainfokeys.append(eik)
# now handle the output dicts and column list
eactual = eik.split('.')
# this handles dereferenced list indices
if not eactual[-1].isdigit():
if not firstlevel:
eactual = '.'.join([eactual[0], eactual[-1]])
else:
eactual = eactual[-1]
else:
elastkey = eactual[-2]
# for list columns, this converts stuff like errs -> err,
# and parallaxes -> parallax
if elastkey.endswith('es'):
elastkey = elastkey[:-2]
elif elastkey.endswith('s'):
elastkey = elastkey[:-1]
if not firstlevel:
eactual = '.'.join([eactual[0], elastkey])
else:
eactual = elastkey
actualkeys.append(eactual)
# add a new column only if required
if eactual not in lc_catalog['columns']:
lc_catalog['columns'].append(eactual)
# we'll overwrite earlier existing columns in any case
lc_catalog['objects'][eactual] = []
# now go through each objectid in the catalog and add the extra keys to
# their respective arrays
for catobj in tqdm(catalog_objectids):
cp_objind = np.where(checkplot_objectids == catobj)
if len(cp_objind[0]) > 0:
# get the info line for this checkplot
thiscpinfo = results[cp_objind[0][0]]
# the first element is the objectid which we remove
thiscpinfo = thiscpinfo[1:]
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
# add the actual thing to the output list
lc_catalog['objects'][ek].append(
thiscpinfo[ekind]
)
else:
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
thiskeyspec = infokeys[ekind]
nonesub = thiskeyspec[-2]
lc_catalog['objects'][ek].append(
nonesub
)
# now we should have all the new keys in the object catalog
# turn them into arrays
for ek in actualkeys:
lc_catalog['objects'][ek] = np.array(
lc_catalog['objects'][ek]
)
# add the magcol to the lc_catalog
if 'magcols' in lc_catalog:
if magcol not in lc_catalog['magcols']:
lc_catalog['magcols'].append(magcol)
else:
lc_catalog['magcols'] = [magcol]
# write back the new object catalog
with open(outfile, 'wb') as outfd:
pickle.dump(lc_catalog, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
| mit |
nikitasingh981/scikit-learn | examples/gaussian_process/plot_compare_gpr_krr.py | 84 | 5205 | """
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using gaussian process regressor
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| bsd-3-clause |
debasishdebs/Scarlett-PA | GreyMatter/send_sms.py | 1 | 2840 | # Currently supports:
# 1:Whatsapp
# 2: Messenger
# 3: SMS
import pandas as pd
# from whatsapp import Client
import config as cfg
from fbchat import Client
from pushbullet import Pushbullet
import creds as cr
class ContactDriver(object):
def __init__(self):
self.__driver__()
pass
def __driver__(self):
self.contacts = self.__read_contacts_()
pass
def __read_contacts_(self):
import os
df = pd.read_csv(os.path.abspath(cfg.CONTACTS_DUMP_CSV))
return df
def fetch_contact(self, contactName):
contact_details = self.contacts[self.contacts['NAME'] == contactName][['NAME', 'PHONE', 'EMAIL']]
return contact_details
class SendWhatsapp(object):
def __init__(self, contactName):
self.nameToSearch = contactName
pass
def __driver_(self):
self.contactObj = ContactDriver()
pass
def __initialize_client_(self):
self.client = Client()
pass
def __fetch_number_(self):
contact_number = self.contactObj.fetch_contact(self.nameToSearch)
return contact_number
def send(self, message):
return
class SendMessenger(object):
def __init__(self):
self.__driver_()
pass
def __initialize_clinet_(self):
self.client = Client("[email protected]", "2NInitu!1")
pass
def __deinitialize_client_(self):
self.client.logout()
pass
def send(self, message, contactName):
try:
user = self.client.searchForUsers(contactName)[0]
uid = user.uid
self.client.sendMessage(message, thread_id=uid)
self.__deinitialize_client_()
return True
except:
return False
pass
def __driver_(self):
self.__initialize_clinet_()
users = self.client.fetchAllUsers()
pass
class SendSMS(object):
def __init__(self):
self.__driver_()
pass
def __initialize_clinet_(self):
self.client = Pushbullet(cr.PUSHBULLET_API_KEY)
pass
def __driver_(self):
self.contactObj = ContactDriver()
self.__initialize_clinet_()
pass
def send(self, message, contactName):
contact_number = self.contactObj.fetch_contact(contactName)
print(contact_number)
number = str(contact_number['PHONE'].values[0])
number = number.replace(" ", "")
print(number)
device = self.client.devices[0]
self.client.push_sms(device, number, message)
pass
if __name__ == '__main__':
# obj = SendMessenger()
# obj.send("Hi. This is check. I'm texting u using my JARVIS, i.e. Scarlett!", "Ayushi Verma")
obj = SendSMS()
obj.send("Hi. This is test. Sending SMS from Scarlett!", "Ayushi Verma")
| mit |
apbard/OrthogonalProjection | orthoproj/orthogonal_projection.py | 1 | 15230 | # Copyright (c) 2016 Alessandro Pietro Bardelli
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module containing OrthoProj, a class which can create an Orthogonal Projection
of 3D data with full axes synchronisation.
"""
# pylint: disable=undefined-variable, invalid-name, eval-used
import types
import itertools
from textwrap import dedent
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from matplotlib.collections import PolyCollection
from six.moves import input
# This generates all sync_A_with_B(self, axis) functions that are used to
# synchronise the axis of two plots
for src, trg in itertools.product(['x', 'y', 'z'], repeat=2):
eval(compile(dedent("""
def sync_{0}_with_{1}(self, axis):
if((axis.get_{1}lim()[0] > axis.get_{1}lim()[1]) !=
(self.get_{0}lim()[0] > self.get_{0}lim()[1])):
self.set_{0}lim(axis.get_{1}lim()[::-1], emit=False)
else:
self.set_{0}lim(axis.get_{1}lim(), emit=False)
""").format(src, trg), '<string>', 'exec'))
def _merge_dicts(*dict_args):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
'''
result = {}
for dictionary in dict_args:
if dictionary is not None:
result.update(dictionary)
return result
class OrthoProj():
"""
Orthogonal Projection object.
"""
_fig = None
_locked = None
_axisXZ = None
_axisYZ = None
_axisXY = None
_axis3D = None
def __init__(self, title=None):
"""
Build an :class:`OrthoProj` object
Args:
title (string). The title string for the orthogonal projection figure.
Default: None, i.e., default naming
"""
fig = plt.figure(title)
axisXZ = fig.add_subplot(221, title="Vertical Plane - XZ")
axisYZ = fig.add_subplot(222, title="Lateral Plane - YZ")
axisXY = fig.add_subplot(223, title="Horizontal Plane - XY")
axis3D = fig.add_subplot(224, title="3D view - XYZ", projection="3d")
for ax in [axisXZ, axisYZ, axisXY, axis3D]:
ax.sync_x_with_x = types.MethodType(sync_x_with_x, ax)
ax.sync_x_with_y = types.MethodType(sync_x_with_y, ax)
ax.sync_x_with_z = types.MethodType(sync_x_with_z, ax)
ax.sync_y_with_x = types.MethodType(sync_y_with_x, ax)
ax.sync_y_with_y = types.MethodType(sync_y_with_y, ax)
ax.sync_y_with_z = types.MethodType(sync_y_with_z, ax)
axis3D.sync_z_with_x = types.MethodType(sync_z_with_x, axis3D)
axis3D.sync_z_with_y = types.MethodType(sync_z_with_y, axis3D)
axis3D.sync_z_with_z = types.MethodType(sync_z_with_z, axis3D)
# Connect XY subplot
axisXY.callbacks.connect('xlim_changed', axisXZ.sync_x_with_x)
axisXY.callbacks.connect('xlim_changed', axis3D.sync_x_with_x)
axisXY.callbacks.connect('ylim_changed', axisYZ.sync_x_with_y)
axisXY.callbacks.connect('ylim_changed', axis3D.sync_y_with_y)
# Connect XZ subplot
axisXZ.callbacks.connect('xlim_changed', axisXY.sync_x_with_x)
axisXZ.callbacks.connect('xlim_changed', axis3D.sync_x_with_x)
axisXZ.callbacks.connect('ylim_changed', axisYZ.sync_y_with_y)
axisXZ.callbacks.connect('ylim_changed', axis3D.sync_z_with_y)
# Connect YZ subplot
axisYZ.callbacks.connect('xlim_changed', axisXY.sync_y_with_x)
axisYZ.callbacks.connect('xlim_changed', axis3D.sync_y_with_x)
axisYZ.callbacks.connect('ylim_changed', axisXZ.sync_y_with_y)
axisYZ.callbacks.connect('ylim_changed', axis3D.sync_z_with_y)
# Connect 3D subplot
axis3D.callbacks.connect('xlim_changed', axisXY.sync_x_with_x)
axis3D.callbacks.connect('xlim_changed', axisXZ.sync_x_with_x)
axis3D.callbacks.connect('ylim_changed', axisXY.sync_y_with_y)
axis3D.callbacks.connect('ylim_changed', axisYZ.sync_x_with_y)
axis3D.callbacks.connect('zlim_changed', axisXZ.sync_y_with_z)
axis3D.callbacks.connect('zlim_changed', axisYZ.sync_y_with_z)
# invert the x axis in the YX subplot
axisYZ.invert_xaxis()
# set labels for 3D plot
axis3D.set_xlabel('X axis')
axis3D.set_ylabel('Y axis')
axis3D.set_zlabel('Z axis')
self._fig = fig
self._axisXZ = axisXZ
self._axisYZ = axisYZ
self._axisXY = axisXY
self._axis3D = axis3D
def plot(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a scatter.
Args:
x, y, z (1D array). Positions of data points.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.scatter`
is used for the 3D plot and standard
:func:`~matplotlib.axes.Axes.plot` for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._plot2DGraphs(x, y, z, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.plot(x, y, z)
else:
self._axis3D.plot(x, y, z, **kwargs3D)
def scatter(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a scatter.
Args:
x, y, z (1D array). Positions of data points.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.scatter`
is used for the 3D plot and standard
:func:`~matplotlib.axes.Axes.scatter` for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._scatter2DGraphs(x, y, z, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.scatter(x, y, z)
else:
self._axis3D.scatter(x, y, z, **kwargs3D)
def plot_trisurf(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a trisurf
'''
raise NotImplementedError("plot_trisurf: Not Implemented Yet")
def plot_surface(self, X, Y, Z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a surface.
Args:
X, Y, Z (2D array). Data values as 2D arrays.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.plot_surface`
is used for the 3D plot and standard :func:`~matplotlib.axes.Axes.plot`
for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._plot2DGraphs(X, Y, Z, kwargsXZ, kwargsYZ, kwargsXY)
self._plot2DGraphs(X.T, Y.T, Z.T, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.plot_surface(X, Y, Z)
else:
self._axis3D.plot_surface(X, Y, Z, **kwargs3D)
def plot_wireframe(self, X, Y, Z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a wireframe.
Args:
X, Y, Z (2D array). Data values as 2D arrays.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary). Extra keyword
arguments to be passed to the single plotting functions.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Axes3D.plot_wireframe`
is used for the 3D plot and standard :func:`~matplotlib.axes.Axes.plot`
for 2D plots.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._plot2DGraphs(X, Y, Z, kwargsXZ, kwargsYZ, kwargsXY)
self._plot2DGraphs(X.T, Y.T, Z.T, kwargsXZ, kwargsYZ, kwargsXY)
if kwargs3D is None:
self._axis3D.plot_wireframe(X, Y, Z)
else:
self._axis3D.plot_wireframe(X, Y, Z, **kwargs3D)
def plot_collection(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None,
kwargs3D=None, kwargsShared=None):
'''
Plot a collection.
Args:
x, y, z (1D array). Arrays containing the vertices of the collection object.
Internally :func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
and :func:`~matplotlib.collections.PolyCollection` are used
to create the collections.
kwargsXZ, kwargsYZ, kwargsXY, kwargs3D (dictionary).
Extra keyword arguments to be passed to the single plotting
functions. Internally :func:`add_collection3d` and
:func:`add_collection` are called.
kwargsShared (dictionary). Extra keyword arguments common to all plots.
Arguments specified via specific kwargs will always have the
precedence and won't be overwritten.
'''
kwargsXZ = _merge_dicts(kwargsShared, kwargsXZ)
kwargsYZ = _merge_dicts(kwargsShared, kwargsYZ)
kwargsXY = _merge_dicts(kwargsShared, kwargsXY)
kwargs3D = _merge_dicts(kwargsShared, kwargs3D)
self._collection2DGraphs(x, y, z, kwargsXZ, kwargsYZ, kwargsXY)
verts = [list(zip(x, y, z))]
if kwargs3D is None:
self._axis3D.add_collection3d(Poly3DCollection(verts))
else:
self._axis3D.add_collection3d(Poly3DCollection(verts, **kwargs3D))
def show(self, block=False):
"""
Display the figure.
Args:
block (bool). If True the computation is blocked waiting for
user's input. Default: False
"""
self._fig.show()
if block:
input("Press any key to continue")
# ###############
# Private Methods
# ###############
def _plot2DGraphs(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None):
"""
Function that plot data on the 2D axis as a simple plot
"""
if kwargsXZ is None:
self._axisXZ.plot(x, z)
else:
self._axisXZ.plot(x, z, **kwargsXZ)
if kwargsYZ is None:
self._axisYZ.plot(y, z)
else:
self._axisYZ.plot(y, z, **kwargsYZ)
if kwargsXY is None:
self._axisXY.plot(x, y)
else:
self._axisXY.plot(x, y, **kwargsXY)
def _scatter2DGraphs(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None):
"""
Function that plot data on the 2D axis as a scatter plot
"""
if kwargsXZ is None:
self._axisXZ.scatter(x, z)
else:
self._axisXZ.scatter(x, z, **kwargsXZ)
if kwargsYZ is None:
self._axisYZ.scatter(y, z)
else:
self._axisYZ.scatter(y, z, **kwargsYZ)
if kwargsXY is None:
self._axisXY.scatter(x, y)
else:
self._axisXY.scatter(x, y, **kwargsXY)
def _collection2DGraphs(self, x, y, z, kwargsXZ=None, kwargsYZ=None, kwargsXY=None):
"""
Function that plot data on the 2D axis as collections
"""
vertxy = [list(zip(x, y))]
vertxz = [list(zip(x, z))]
vertyz = [list(zip(y, z))]
if kwargsXY is None:
self._axisXY.add_collection(PolyCollection(vertxy))
else:
self._axisXY.add_collection(PolyCollection(vertxy, **kwargsXY))
if kwargsXZ is None:
self._axisXZ.add_collection(PolyCollection(vertxz))
else:
self._axisXZ.add_collection(PolyCollection(vertxz, **kwargsXZ))
if kwargsYZ is None:
self._axisYZ.add_collection(PolyCollection(vertyz))
else:
self._axisYZ.add_collection(PolyCollection(vertyz, **kwargsYZ))
| mit |
wwf5067/statsmodels | statsmodels/tsa/tests/test_seasonal.py | 27 | 9216 | import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_raises
from statsmodels.tsa.seasonal import seasonal_decompose
from pandas import DataFrame, DatetimeIndex
class TestDecompose:
@classmethod
def setupClass(cls):
# even
data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]
cls.data = DataFrame(data, DatetimeIndex(start='1/1/1951',
periods=len(data),
freq='Q'))
def test_ndarray(self):
res_add = seasonal_decompose(self.data.values, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
res_mult = seasonal_decompose(np.abs(self.data.values), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal, seasonal, 4)
assert_almost_equal(res_mult.trend, trend, 2)
assert_almost_equal(res_mult.resid, random, 4)
# test odd
res_add = seasonal_decompose(self.data.values[:-1], freq=4)
seasonal = [68.18, 69.02, -82.66, -54.54, 68.18, 69.02, -82.66,
-54.54, 68.18, 69.02, -82.66, -54.54, 68.18, 69.02,
-82.66, -54.54, 68.18, 69.02, -82.66, -54.54, 68.18,
69.02, -82.66, -54.54, 68.18, 69.02, -82.66, -54.54,
68.18, 69.02, -82.66]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, np.nan, np.nan]
random = [np.nan, np.nan, 72.538, 64.538, -42.426, -77.150,
-12.087, -67.962, 99.699, 120.725, -2.962, -4.462,
9.699, 6.850, -38.962, -33.462, 40.449, -40.775, 22.288,
-42.462, -43.301, 168.975, -81.212, 80.538, -15.926,
-176.900, 42.413, 5.288, -46.176, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
def test_pandas(self):
res_add = seasonal_decompose(self.data, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal.values.squeeze(), seasonal, 2)
assert_almost_equal(res_add.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_add.resid.values.squeeze(), random, 3)
assert_equal(res_add.seasonal.index.values.squeeze(),
self.data.index.values)
res_mult = seasonal_decompose(np.abs(self.data), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal.values.squeeze(), seasonal, 4)
assert_almost_equal(res_mult.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_mult.resid.values.squeeze(), random, 4)
assert_equal(res_mult.seasonal.index.values.squeeze(),
self.data.index.values)
def test_filt(self):
filt = np.array([1/8., 1/4., 1./4, 1/4., 1/8.])
res_add = seasonal_decompose(self.data.values, filt=filt, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
def test_raises(self):
assert_raises(ValueError, seasonal_decompose, self.data.values)
assert_raises(ValueError, seasonal_decompose, self.data, 'm',
freq=4)
x = self.data.astype(float).copy()
x.ix[2] = np.nan
assert_raises(ValueError, seasonal_decompose, x)
| bsd-3-clause |
WangWenjun559/Weiss | summary/sumy/sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| apache-2.0 |
yunfanz/ReionBub | Choud14/inttest.py | 1 | 6355 | import numpy as n, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad, tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
# o = optparse.OptionParser()
# o.add_option('-d','--del0', dest='del0', default=5.)
# o.add_option('-m','--mul', dest='mul', default=1.)
# o.add_option('-z','--red', dest='red', default=12.)
# opts,args = o.parse_args(sys.argv[1:])
# print opts, args
Om,sig8,ns,h,Ob = 0.315, 0.829, 0.96, 0.673, 0.0487
cosmo = {'baryonic_effects':True,'omega_k_0':0,'omega_M_0':0.315, 'omega_b_0':0.0487, 'n':0.96, 'N_nu':0, 'omega_lambda_0':0.685,'omega_n_0':0., 'sigma_8':0.829,'h':0.673}
def m2R(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
RL = (3*m/4/n.pi/rhobar)**(1./3)
return RL
def m2V(m):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
return m/rhobar
def R2m(RL):
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
m = 4*n.pi/3*rhobar*RL**3
return m
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
def RG(RL): return 0.46*RL
def W(y): return 3/y**3*(n.sin(y)-y*n.cos(y))
def WG(y): return n.exp(-y**2/2)
def Del2k(k):
Pk = pb.power_spectrum(k,0.,**cosmo)
Del2k = k**3*Pk/2/n.pi**2
#fgrowth = pb.fgrowth(z, cosmo['omega_M_0'])
#Del2k0 = Del2k/fgrowth**2#*pb.norm_power(**cosmo)
return Del2k
#def sig0(RL,Del2k):
# return n.sum(Del2k**2*W(RL*k)**2)*(logk[1]-logk[0])
#def sig0(RL,Del2k):
# return n.sum(Del2k**2*W(RL*k)**2/k)*(k[1]-k[0])
def polyval2d(x, y, m):
order = int(n.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = n.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
def sig0test(RL,kmax):
return quad(lambda k: Del2k(k)*W(RL*k)**2/k, 0, kmax)[0] #z=0 extrapolated to present
def sig0(RL):
return (pb.sigma_r(RL,0.,**cosmo)[0])**2
def sigG(RL,j):
return (pb.sigma_j(RL,j,0.,**cosmo)[0])**2
dsig1m = n.load('sig1m.npz')
sig1mRl,sig1marr = dsig1m['arr_0'],dsig1m['arr_1']
fs1m = interp1d(sig1mRl,sig1marr,kind='cubic') #interpolated from 0.04 to 100000
def sig1m(RL):
return fs1m(RL)
dSX = n.load('logSX.npz')
lSXRl,lSXR0,arrSX = dSX['arr_0'],dSX['arr_1'],dSX['arr_2']
fSX = RBS(lSXRl,lSXR0,arrSX)
def SX(RL,R0):
res = fSX(n.log(RL),n.log(R0))
if res.size > 1: print 'Warning: SX called with array instead of single number'
return res[0][0]
ds1mX = n.load('logsig1mX.npz')
ls1mXRl,ls1mXR0,arrs1mX = ds1mX['arr_0'],ds1mX['arr_1'],ds1mX['arr_2']
fs1mX = RBS(ls1mXRl,ls1mXR0,arrs1mX)
def sig1mX(RL,R0):
res = fs1mX(n.log(RL),n.log(R0))
if res.size > 1: print 'Warning: s1mX called with array instead of single number'
return res[0][0]
def gam(RL):
return sig1m(RL)/n.sqrt(sig0(RL)*sigG(RL,2))
def Vstar(RL):
return (6*n.pi)**1.5*(sigG(RL,1)/sigG(RL,2))**3
def erf(x):
return scipy.special.erf(x)
def prob(x,av=0.5,var=0.25):
return 1/n.sqrt(2*n.pi*var)/x*n.exp(-(n.log(x)-av)**2/2/var)
def F(x):
return (x**3-3*x)/2*(erf(x*n.sqrt(5./2))+erf(x*n.sqrt(5./8)))+n.sqrt(2./5/n.pi)*((31*x**2/4+8./5)*n.exp(-5.*x**2/8)+(x**2/2-8./5)*n.exp(-5.*x**2/2))
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
def pG(y,av,var):
return 1/n.sqrt(2*n.pi*var)*n.exp(-(y-av)**2/2/var)
def B(z,beta,s):
return Deltac(z)+beta*n.sqrt(s)
def Q(m,M0):
r,R0 = m2R(m), m2R(M0)
s,s0 = sig0(r), sig0(R0)
sx = SX(r,R0)
return 1-sx**2/s/s0
def epX(m,M0):
r,R0 = m2R(m), m2R(M0)
s,s0 = sig0(r), sig0(R0)
sx = SX(r,R0)
sg1m = sig1m(r)
sg1mX = sig1mX(r,R0)
return s*sg1m/sx/sg1mX
def trapz(x,y):
return (x[-1]*y[-1]-x[0]*y[0]+n.sum(x[1:]*y[:-1]-y[1:]*x[:-1]))/2
def subgrand_trapz_log(b,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z,err=False):
Bb = B(z,b,s)
#print 'gamm,epx,q =',gamm,epx,q
meanx = gamm*((Bb-del0*sx/s0)*(1-epx)/q/n.sqrt(s)+Bb*epx/n.sqrt(s))
fact = V/Vstar(R0)*pG(Bb/n.sqrt(s),meanmu, varmu)
#print b, Bb/n.sqrt(s),meanmu,varmu,pG(Bb/n.sqrt(s),meanmu, varmu)
#print b
lxmin,lxmax = n.log(b*gamm), n.log(80.)
lx = n.linspace(lxmin,lxmax,100)
x = n.exp(lx)
y = (x/gamm-b)*F(x)*pG(x,meanx,varx)*x
factint = trapz(x,y)
#print y
#print factint
#factint = quad(lambda x: (x/gamm-b)*F(x)*pG(x,meanx,varx),b*gamm,100)[0]
#print fact, factint
return fact*factint
def subgrand_trapz(b,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z,err=False):
Bb = B(z,b,s)
#print 'gamm,epx,q =',gamm,epx,q
meanx = gamm*((Bb-del0*sx/s0)*(1-epx)/q/n.sqrt(s)+Bb*epx/n.sqrt(s))
fact = V/Vstar(R0)*pG(Bb/n.sqrt(s),meanmu, varmu)
#print b, Bb/n.sqrt(s),meanmu,varmu,pG(Bb/n.sqrt(s),meanmu, varmu)
#print b
x = n.linspace(b*gamm,100.,100)
y = (x/gamm-b)*F(x)*pG(x,meanx,varx)
factint = trapz(x,y)
#print y
#print factint
#factint = quad(lambda x: (x/gamm-b)*F(x)*pG(x,meanx,varx),b*gamm,100)[0]
#print fact, factint
return fact*factint
def integrand_trapz(del0,m,M0,R0,z): #2s*f_ESP
s = sig0(m2R(m))
V,r,dmdr = pb.volume_radius_dmdr(m,**cosmo)
s,s0,sx = sig0(r), sig0(R0),SX(r,R0)
gamm = gam(r)
epx,q = epX(m,M0), Q(m,M0)
meanmu = del0/n.sqrt(s)*sx/s0
varmu = Q(m,M0)
varx = 1-gamm**2-gamm**2*(1-epx)**2*(1-q)/q
b = n.arange(0.000001,3.,0.03)
y = []
for bx in b:
y.append(prob(bx)*subgrand_trapz(bx,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z)/2/s)
#print 'b,y'
#print bx,y[-1]
if n.isnan(y[-1]):
print 'NAN detected, breaking at: '
print bx,prob(bx),del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V
break
return n.trapz(y,b,dx=0.05)
#return quad(lambda b: prob(b)*subgrand_trapz(b,del0,m,M0,z),0,4.)[0]/2/s
def dsdm(m):
return n.abs(sig0(m2R(m+1))-sig0(m2R(m-1)))/2
# def fcoll(del0,M0,z):
# mm = mmin(z)
# R0 = m2R(M0)
# return quad(lambda m: integrand_trapz(del0,m,M0,R0,z)*dsdm(m),mm,M0)
# def fcoll_trapz(del0,M0,z):
# mm = mmin(z)
# R0 = m2R(M0)
# mx = n.arange(mm,M0,mm)
# y = []
# for m in mx:
# y.append(integrand_trapz(del0,m,M0,R0,z)*dsdm(m))
# print m, y[-1]
# return n.trapz(y,mx,dx=mm)
# #eturn trapz(mx,y)
def fcoll_trapz_log(del0,M0,z):
mm = mmin(z)
R0 = m2R(M0)
lmx = n.linspace(n.log(mm),n.log(M0),100)
y = []
for lm in lmx:
m = n.exp(lm)
y.append(integrand_trapz(del0,m,M0,R0,z)*dsdm(m)*m)
#print m, y[-1]
return trapz(lmx,y),n.exp(lmx),y
def m2S(m):
return sig0(m2R(m))
| mit |
shishaochen/TensorFlow-0.8-Win | tensorflow/examples/skflow/out_of_core_data_classification.py | 3 | 2196 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics, cross_validation
import pandas as pd
import dask.dataframe as dd
from tensorflow.contrib import skflow
# Sometimes when your dataset is too large to hold in the memory
# you may want to load it into a out-of-core dataframe as provided by dask library
# to firstly draw sample batches and then load into memory for training.
# Load dataset.
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# Note that we use iris here just for demo purposes
# You can load your own large dataset into a out-of-core dataframe
# using dask's methods, e.g. read_csv() in dask
# details please see: http://dask.pydata.org/en/latest/dataframe.html
# We firstly load them into pandas dataframe and then convert into dask dataframe
X_train, y_train, X_test, y_test = [pd.DataFrame(data) for data in [X_train, y_train, X_test, y_test]]
X_train, y_train, X_test, y_test = [dd.from_pandas(data, npartitions=2) for data in [X_train, y_train, X_test, y_test]]
# Initialize a TensorFlow linear classifier
classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
# Fit the model using training set
classifier.fit(X_train, y_train)
# Make predictions on each partitions of testing data
predictions = X_test.map_partitions(classifier.predict).compute()
# Calculate accuracy
score = metrics.accuracy_score(y_test.compute(), predictions)
| apache-2.0 |
johnnycakes79/pyops | pyops/plots.py | 1 | 15870 | import pandas as pd
from bokeh.plotting import figure, show, output_notebook, gridplot
from bokeh.palettes import brewer
from collections import OrderedDict
from bokeh.models import HoverTool
import numpy as np
from bokeh.models import ColumnDataSource, Range1d, FactorRange
from datetime import datetime
# BREWER_PLOT
def brewer_plot(defata, instruments_all, instruments=None):
"""
This function shows two bokeh brewer plots into the ipython notebook using
the data given as a parameter. In the second one only the instruments
given in the third parameter are plotted. In the first one all of them
are plotted.
:param data: power_avg table
:type data: pandas DataFrame
:param instruments_all: All the instruments in the power_avg file
:type instruments_all: List of strings
:param instruments: Instruments to be plotted in the second plot
:type instruments: List of strings
:returns: Nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
# Creating both figures
big_figure = create_plot(data, instruments_all)
small_figure = create_plot(data, instruments, big_figure.x_range)
# Plotting them together
p = gridplot([[big_figure], [small_figure]])
show(p)
def create_plot(data, instruments, x_range=None):
"""
This function creates a plot given a power_avg table and the instruments
to be plotted. Optionally an x_range to be linked to another plot can be
passed as a parameter.
:param data: module_states or modes table
:type data: pandas DataFrame
:param instruments: Instruments to be plotted
:type instruments: List of strings
:param x_range: x_range to be linked with
:type x_range: figure x_range
:returns: bokeh figure
"""
# Create a set of tools to use
tools = "resize,hover,save,pan,box_zoom,wheel_zoom,reset"
# Creating the areas to be plotted
areas = stacked(data, instruments)
# Selecting the colors for the calculated areas
colors = palette(len(areas))
# Stacking the values of each instrument
x2 = np.hstack((data.index.values[::-1], data.index.values))
# Creating the figure
if x_range is None:
f = figure(x_axis_label=data.index.name, y_axis_label='Watts',
x_axis_type="datetime", tools=tools, logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)))
else:
f = figure(x_axis_label=data.index.name, y_axis_label='Watts',
x_axis_type="datetime", x_range=x_range, tools=tools,
logo=None)
for pos in range(len(colors)):
f.patch(x2, list(areas.values())[pos], color=colors[pos],
legend=instruments[pos], line_color=None, alpha=0.8)
# Setting the color of the line of the background
f.grid.minor_grid_line_color = '#eeeeee'
return f
def palette(number):
"""
This function returns a palette of hex colors of size number.
:param number: Amount of different colors needed
:type number: integer
:returns: list of strings
"""
if number > 40:
print ("Ooops, too many parameters, not enough colors...")
# Selecting the colors from different bokeh palettes
palette = brewer["Spectral"][11]
palette += list(reversed(brewer["RdBu"][11]))
palette += brewer["YlGnBu"][9]
palette += list(reversed(brewer["YlGn"][9]))
palette += brewer["PiYG"][11]
return palette[:number]
def stacked(df, categories):
"""
This function stacks all the power information for each instrument.
:param df: power_avg pandas DataFrame
:type df: pandas DataFrame
:param categories: categories in which the plot is going to be divided
:type categories: list of values
:returns: pandas DataFrame
"""
areas = OrderedDict()
last = np.zeros(len(df[categories[0]]))
for cat in categories:
next = last + df[cat]
areas[cat] = np.hstack((last[::-1], next))
last = next
return areas
# MODES_SCHEDULE
def modes_schedule(data):
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_modes_schedule(data))
def get_modes_schedule(data, x_range=None):
"""
This function create a time line plot based on the data form modes or
module_states files.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: Nothing
"""
# Adding new column to see which instruments are changing in each entry
data = add_difference_column(data)
# Building a new table to make the data plotable by bokeh
start_end_table = build_start_end_table(data)
source = ColumnDataSource(start_end_table)
# Selecting the instruments detected in the data
instruments = [colum for colum in data if colum.upper() == colum]
instruments.sort(reverse=True)
# Creating the figure
if x_range is None:
p = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(start_end_table['Start_time']),
max(start_end_table['End_time'])),
y_range=FactorRange(factors=instruments),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
p = figure(x_axis_type="datetime", logo=None,
x_range=x_range,
y_range=FactorRange(factors=instruments),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
p.quad(left='Start_time', right='End_time', top='Instrument_top',
bottom='Instrument_bottom', color='Color', source=source)
# Adding the hover tool to see info when putting the mouse over the plot
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('Mode', '@Mode'),
('Time', '@Time')
])
return p
def add_difference_column(data):
"""
This function returns the same pandas DataFrame that it receives as a
parameter but with a new column, which contains which instrument has
changed since the last recorded state in the table.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: pandas DataFrame
"""
# We create a list of lists, which will be the new column to add to to data
difference = [[]]
# We take the first row of the table to have the starting values
data_aux = data.transpose()
prev_row = data_aux[data_aux.columns.values[0]]
difference[0] = [element for element in prev_row.index
if prev_row[element] is not None]
# For each entry in the table we detect which instruments are changig
# since the previous row
pos = 0
for row in data_aux:
for element in data_aux.index:
if not prev_row[element] == data_aux[row][element]:
if not len(difference) == pos + 1:
difference.append([element])
else:
difference[pos].append(element)
if not len(difference) == pos + 1:
difference.append([])
prev_row = data_aux[row]
pos += 1
# Finally we add the calculated column
data["Change"] = difference
return data
def build_start_end_table(data):
"""
This function returns a pandas DataFrame which will be used to make a Bokeh
directly from it. This DataFrame will be created from the data received as
a parameter.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: pandas DataFrame
"""
# Creating the DataFrame manually
di = {"End_time": [], "Instrument": [],
"Mode": [], "Start_time": [], "Time": []}
# Filling the new DataFrame with the instrument, mode and start time
data_aux = data.transpose()
for row in data_aux:
row_t = data_aux[row].transpose()
for instrument in row_t["Change"]:
di["End_time"].append(None)
di["Instrument"].append(instrument)
di["Mode"].append(row_t[instrument])
di["Start_time"].append(row)
di["Time"] = [str(x) for x in di["Start_time"]]
df = pd.DataFrame(di)
df = df.sort(["Start_time"], ascending=True)
instruments = [colum for colum in data if colum.upper() == colum]
# Calculating and adding the end time for each task
for ins in instruments:
shift = df.loc[df["Instrument"] == ins].shift(-1)
if len(shift) > 1:
for i in range(len(shift.index.values)):
di["End_time"][shift.index.values[i]] = \
shift["Start_time"][shift.index.values[i]]
df = pd.DataFrame(di)
# Calculating and adding the end time for tasks without unespecified end
for pos in range(len(df["End_time"])):
if not type(df["End_time"][pos]) is pd.tslib.Timestamp:
df.loc[pos, "End_time"] = df["Start_time"].max()
# Deleting OFF states, we don't want to plot it
df = df[df.Mode != "OFF"]
df[["End_time", "Start_time"]] = \
df[["End_time", "Start_time"]].astype(datetime)
# Creating new rows needed for making the bars wider in the plot
df["Instrument_bottom"] = [row + ":0.25" if " " in row else row + ":0.1"
for row in df["Instrument"].values]
df["Instrument_top"] = [row + ":0.75" if " " in row else row + ":0.9" for
row in df["Instrument"].values]
# Setting different colors for each different mode in the DataFrame
modes = df["Mode"].unique()
colors = dict(zip(modes, palette(len(modes))))
df["Color"] = [colors[row] for row in df["Mode"].values]
return df
# DATA_PLOT
def data_plot(data, instruments):
"""
This function shows a data plot in the ipython notebook using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_data_plot(data, instruments))
def get_data_plot(data, instruments, x_range=None):
"""
This function returns a data rate plot bokeh figure using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:param x_range: x_range from another figure to link with
:type x_range: x_range bokeh format
:returns: bokeh figure
"""
# Creating the figure depending if we want to link it to another figure
if x_range is None:
r = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
r = figure(x_axis_type="datetime", x_range=x_range, logo=None,
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
# Getting the appropiate list of colors
colors = palette(len(instruments))
i = 0
# Transforming the multiindex dataframe into a normal one to use hover tool
d = transform_multiindex_df(data, instruments)
# Inserting the lines in the plot
for ins in instruments:
r.line(d['index'], d[ins[0] + "_" + ins[1]], color=colors[i],
legend=ins[0] + " - " + ins[1], line_width=3)
# I don't know why, but if this source is not rebuilt every single
# time, it doesn't plot correctly
source = ColumnDataSource(d)
# WARNING: THIS IS A HACK
# Hover tool doesn't work over lines show I have created points
r.scatter(d['index'], d[ins[0] + "_" + ins[1]], color=colors[i],
source=source, fill_color=None, size=8)
i += 1
r.title = "Data Rate"
r.grid.grid_line_alpha = 0.3
# Adding the hover tool to see info when putting the mouse over the plot
hover = r.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([("Time", "@Time")] +
[(ins[0] + " - " + ins[1], "@" + ins[0] + "_"
+ ins[1]) for ins in instruments])
return r
def transform_multiindex_df(data, instruments):
"""
This function returns a pandas DataFrame without a multiindex and prepared
to be plotted and used by the hover tool when converted to the proper
format.
:param data: power usage pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: pandas DataFrame
"""
d = {}
d['Time'] = [str(x) for x in pd.to_datetime(data.index.values)]
d['index'] = data.index.values
for ins in instruments:
d[ins[0] + "_" + ins[1]] = \
[x[0] for x in data[ins[0], ins[1]].values.tolist()]
df = pd.DataFrame(d)
return df
# POWER_PLOT
def power_plot(data, instruments):
"""
This function shows a power plot in the ipython notebook using the given
data for the given instruments.
:param data: power usage pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_power_plot(data, instruments))
def get_power_plot(data, instruments, x_range=None):
"""
This function returns a power plot bokeh figure using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:param x_range: x_range from another figure to link with
:type x_range: x_range bokeh format
:returns: bokeh figure
"""
# Creating the figure depending if we want to link it to another figure
if x_range is None:
r = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
r = figure(x_axis_type="datetime", x_range=x_range, logo=None,
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
# Getting the appropiate list of colors
colors = palette(len(instruments))
i = 0
# Preparing a set of data to convert into a source for the hover tool
d = data.copy(deep=True)
d['Time'] = [str(x) for x in pd.to_datetime(data.index.values)]
# Inserting the lines in the plot
for ins in instruments:
r.line(data.index.values, data[ins], color=colors[i],
legend=ins, line_width=3)
# I don't know why, but if this source is not rebuilt every single
# time, it doesn't plot correctly
source = ColumnDataSource(d)
# WARNING: THIS IS A HACK
# Hover tool doesn't work over lines show I have created points
r.scatter(data.index.values, data[ins], color=colors[i], source=source,
fill_color=None, size=8)
i += 1
r.title = "Power"
r.grid.grid_line_alpha = 0.3
# Adding the hover tool to see info when putting the mouse over the plot
hover = r.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([("Time", "@Time")] +
[(ins, "@" + ins) for ins in instruments])
return r
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/cross_validation.py | 21 | 73625 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneOut` instead.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePOut` instead.
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.KFold` instead.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupKFold` instead.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedKFold` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneGroupOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePGroupsOut` instead.
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ShuffleSplit` instead.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.StratifiedShuffleSplit` instead.
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(np.bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = np.bincount(self.y_indices)
for n in range(self.n_iter):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(cls_count, self.n_train, rng)
class_counts_remaining = cls_count - n_i
t_i = _approximate_mode(class_counts_remaining, self.n_test, rng)
train = []
test = []
for i, _ in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
perm_indices_class_i = np.where(
(i == self.y_indices))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.PredefinedSplit` instead.
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GroupShuffleSplit` instead.
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_predict` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.cross_val_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return safe_indexing(y, ind)
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.check_cv` instead.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` is used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.permutation_test_score` instead.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.train_test_split` instead.
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/barchart_demo2.py | 6 | 4076 | """
Thanks Josh Hemann for the example
This examples comes from an application in which grade school gym
teachers wanted to be able to show parents how their child did across
a handful of fitness tests, and importantly, relative to how other
children did. To extract the plotting code for demo purposes, we'll
just make up some data for little Johnny Doe...
"""
import numpy as np
import matplotlib.pyplot as plt
import pylab
from matplotlib.patches import Polygon
from matplotlib.ticker import MaxNLocator
student = 'Johnny Doe'
grade = 2
gender = 'boy'
cohortSize = 62 #The number of other 2nd grade boys
numTests = 5
testNames = ['Pacer Test', 'Flexed Arm\n Hang', 'Mile Run', 'Agility',
'Push Ups']
testMeta = ['laps', 'sec', 'min:sec', 'sec', '']
scores = ['7', '48', '12:52', '17', '14']
rankings = np.round(np.random.uniform(0, 1, numTests)*100, 0)
fig = plt.figure(figsize=(9,7))
ax1 = fig.add_subplot(111)
plt.subplots_adjust(left=0.115, right=0.88)
fig.canvas.set_window_title('Eldorado K-8 Fitness Chart')
pos = np.arange(numTests)+0.5 #Center bars on the Y-axis ticks
rects = ax1.barh(pos, rankings, align='center', height=0.5, color='m')
ax1.axis([0,100,0,5])
pylab.yticks(pos, testNames)
ax1.set_title('Johnny Doe')
plt.text(50, -0.5, 'Cohort Size: ' + str(cohortSize),
horizontalalignment='center', size='small')
# Set the right-hand Y-axis ticks and labels and set X-axis tick marks at the
# deciles
ax2 = ax1.twinx()
ax2.plot([100,100], [0, 5], 'white', alpha=0.1)
ax2.xaxis.set_major_locator(MaxNLocator(11))
xticks = pylab.setp(ax2, xticklabels=['0','10','20','30','40','50','60',
'70',
'80','90','100'])
ax2.xaxis.grid(True, linestyle='--', which='major', color='grey',
alpha=0.25)
#Plot a solid vertical gridline to highlight the median position
plt.plot([50,50], [0, 5], 'grey', alpha=0.25)
# Build up the score labels for the right Y-axis by first appending a carriage
# return to each string and then tacking on the appropriate meta information
# (i.e., 'laps' vs 'seconds'). We want the labels centered on the ticks, so if
# there is no meta info (like for pushups) then don't add the carriage return to
# the string
def withnew(i, scr):
if testMeta[i] != '' : return '%s\n'%scr
else: return scr
scoreLabels = [withnew(i, scr) for i,scr in enumerate(scores)]
scoreLabels = [i+j for i,j in zip(scoreLabels, testMeta)]
pylab.yticks(pos, scoreLabels)
ax2.set_ylabel('Test Scores')
#Make list of numerical suffixes corresponding to position in a list
# 0 1 2 3 4 5 6 7 8 9
suffixes =['th', 'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th']
ax2.set_xlabel('Percentile Ranking Across ' + str(grade) + suffixes[grade] \
+ ' Grade ' + gender.title() + 's')
# Lastly, write in the ranking inside each bar to aid in interpretation
for rect in rects:
# Rectangle widths are already integer-valued but are floating
# type, so it helps to remove the trailing decimal point and 0 by
# converting width to int type
width = int(rect.get_width())
# Figure out what the last digit (width modulo 10) so we can add
# the appropriate numerical suffix (e.g. 1st, 2nd, 3rd, etc)
lastDigit = width % 10
# Note that 11, 12, and 13 are special cases
if (width == 11) or (width == 12) or (width == 13):
suffix = 'th'
else:
suffix = suffixes[lastDigit]
rankStr = str(width) + suffix
if (width < 5): # The bars aren't wide enough to print the ranking inside
xloc = width + 1 # Shift the text to the right side of the right edge
clr = 'black' # Black against white background
align = 'left'
else:
xloc = 0.98*width # Shift the text to the left side of the right edge
clr = 'white' # White on magenta
align = 'right'
yloc = rect.get_y()+rect.get_height()/2.0 #Center the text vertically in the bar
ax1.text(xloc, yloc, rankStr, horizontalalignment=align,
verticalalignment='center', color=clr, weight='bold')
plt.show()
| mit |
ZLake/Deep_Learning_on_Tensorflow | RL Learning/CATCH_Keras_RL/test.py | 1 | 1347 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 3 15:25:10 2017
@author: lakezhang
"""
import json
import matplotlib.pyplot as plt
import numpy as np
from keras.models import model_from_json
from qlearn import Catch
if __name__ == "__main__":
# Make sure this grid size matches the value used for training
grid_size = 10
with open("model.json", "r") as jfile:
model = model_from_json(json.load(jfile))
model.load_weights("model.h5")
model.compile("sgd", "mse")
# Define environment, game
env = Catch(grid_size)
c = 0
for e in range(10):
loss = 0.
env.reset()
game_over = False
# get initial input
input_t = env.observe()
plt.imshow(input_t.reshape((grid_size,)*2),
interpolation='none', cmap='gray')
plt.savefig("%03d.png" % c)
c += 1
while not game_over:
input_tm1 = input_t
# get next action
q = model.predict(input_tm1)
action = np.argmax(q[0])
# apply action, get rewards and new state
input_t, reward, game_over = env.act(action)
plt.imshow(input_t.reshape((grid_size,)*2),
interpolation='none', cmap='gray')
plt.savefig("%03d.png" % c)
c += 1 | mit |
voxlol/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
anapophenic/knb | knbTest.py | 1 | 5911 | ###############################################################################
# knbTest.py
#
# Example usage of implementation of kernel multi-view spectral algorithm of
# Song et al. (2014)
#
# Author: E.D. Gutierrez ([email protected])
# Created: 24 March 2016
# Last modified: 29 March 2016
#
###############################################################################
import kernelNaiveBayes as knb
import numpy as np
import scipy
import scipy.spatial.distance
import matplotlib.pyplot as plt
import scipy.stats
def initGaussMM(k,m, means=None,variances=None):
"""
Create synthetic data drawn from gaussian mixture model.
Inputs:
k: number of hidden states (mixture components)
m: number of samples per view
Optional:
means: array of k means of mixture components
variances: array of k variances of mixture ocmponents
Return:
X: array of shape (m x 3 ) with m observations for each of 3 views
H: array of shape (m x 1) listing mixture component that generate each
observation
means: array of k means of mixture components
variances: array of k variances of mixture ocmponents
"""
X = np.zeros((m,3))
H = np.zeros(m,int)
if (means is None) or (variances is None):
means = []
variances = []
for kk in range(k):
means.append(float(kk))
variances.append(0.1*(kk+1))
for mm in range(m):
H[mm] = int(np.random.randint(0,k))
for j in range(3):
X[mm,j] = np.random.normal(means[H[mm]],variances[H[mm]])
return X,H,means,variances
def plot2(pXbarH,xRange,savepath='pdf.png'):
# plot results
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(1,2,1); x = xRange; y = np.array(pXbarH[:,0].T).flatten()
ax.scatter(x,y); ax.set_title('pdf of component h=0'); ax.set_xlabel("x")
ax.set_ylabel("pdf"); ax = fig.add_subplot(1,2,2); x = xRange; y = np.array(pXbarH[:,1].T).flatten()
ax.scatter(x,y)
ax.set_title('pdf of component h=1'); ax.set_xlabel("x"); ax.set_ylabel("pdf")
fig.savefig(savepath)
def plot3(pXbarH,xRange,truepdf,savepath='pdf.png'):
# plot results
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(1,2,1); x = xRange; y = np.array(pXbarH[:,0].T).flatten()
ax.plot(x,y);
y = np.array(truepdf[:,0].T).flatten(); ax.plot(x,y)
ax.set_title('pdf of component h=0'); ax.set_xlabel("x")
ax.set_ylabel("pdf"); ax = fig.add_subplot(1,2,2); x = xRange; y = np.array(pXbarH[:,1].T).flatten()
ax.plot(x,y)
y = np.array(truepdf[:,1].T).flatten(); ax.plot(x,y)
ax.set_title('pdf of component h=1'); ax.set_xlabel("x"); ax.set_ylabel("pdf")
fig.savefig(savepath)
def testCode():
# Example test code
k=2; #number of latent states
pXbarH = {}
randinits = 100 #number of outer loop iterations (restarts) to run
for i in range(randinits):
X,H,means,variances = initGaussMM(k,1000,means=[1.5,-1],variances=[0.4,0.4])
xRange = np.matrix(np.linspace(min(means)-2*max(variances)**0.5,
max(means)+2*max(variances)**0.5,500)).T #create 500 equally spaced samples for visualizing p(x|h)
pXbarH[i] = knb.kernXMM(X,k,xRange,var=.01) #compute p(x|h) estimate
sumprob = np.matrix(np.zeros(np.shape(pXbarH[0])))
for i in pXbarH.keys():
map0 = xRange[np.argmax(pXbarH[i][:,0])]
if np.abs(map0-means[0])<np.abs(map0-means[1]):
sumprob +=pXbarH[i]
else:
for j in range(pXbarH[i].shape[0]):
sumprob[j,0] += max([0,np.matrix(pXbarH[i][j,1])])
sumprob[j,1] += max([0,np.matrix(pXbarH[i][j,0])])
#scale the pdfs to make them comparable
truepdf = np.zeros(pXbarH[0].shape)
for kk in range(k):
for i,item in enumerate(xRange):
truepdf[i,kk] = scipy.stats.norm.pdf(item, means[kk],variances[kk])
for kk in range(k):
o = truepdf[:,kk].sum()
p = sumprob[:,kk].sum()
for i in range(len(xRange)):
truepdf[i,kk] = truepdf[i,kk]/o
sumprob[i,kk] = sumprob[i,kk]/p
plot3(sumprob,xRange,truepdf,savepath='pdfcompare.png')
plot2(sumprob,xRange,savepath='pdf.png')
#maximum a posteriori estimate of component 0:
map0 = xRange[np.argmax(pXbarH[:,0])]
#maximum a posterior estimate of component 1:
map1 = xRange[np.argmax(pXbarH[:,1])]
print 'Maximum a posteriori means:\t'+str(map0)+'\t'+str(map1)
########################################################
########################################################
import kernelNaiveBayes as knb
import data_import as d_i
import moments_cons as m_c
basepath = 'C:/Users/e4gutier/Downloads/mukamel/cndd/emukamel/HMM/Data/Binned/'
l=5000
N,X,a = d_i.data_prep(basepath+'allc_AM_E1_chrY_binsize100.mat','kern',l)
kernel = 'beta_shifted'
N,X_importance_weighted,a = d_i.data_prep(basepath+'allc_AM_E1_chrY_binsize100.mat','explicit',l)
#noise = np.random.rand(Data.shape[0],Data.shape[1])/1e3 #jiggle the data points a little bit to improve conditioning
m = 3; n=5 #m is number of hidden states; n is number of bins in kernel
xRange = np.arange(0,X.max(),int(X.max()/n))
O_h,T_h = knb.kernXMM(X,m,xRange,var=[N,n],kernel='beta_shifted')
O_h,T_h = m_c.col_normalize(O_h),m_c.col_normalize(T_h)
##
kernel = 'beta_shifted'; var = [N,n];k=m; queryX = xRange
(K,L,G) = knb.computeKerns3(X,kernel,False,var)
(A,pi) = knb.kernSpecAsymm(K,L,G,k,view=2)
(A3,_) = knb.kernSpecAsymm(K,L,G,k,view=3)
O_h = knb.crossComputeKerns(queryX,X[:,2].T,kernel,False,var)*A
OT_h = knb.crossComputeKerns(queryX,X[:,2].T,kernel,False,var)*A3
T_h = np.linalg.pinv(O_h).dot(OT_h)
##
phi = m_c.phi_beta_shifted;
P_21, P_31, P_23, P_13, P_123 = m_c.moments_cons_importance_weighted(X_importance_weighted, phi, N, n);
C_h, T_h = m_c.estimate(P_21, P_31, P_23, P_13, P_123, m)
C_h_p, T_h_p = m_c.estimate_refine(C_h, P_21, phi, N, n, m, a)
| cc0-1.0 |
rodluger/pysyzygy | examples/feature_test.py | 1 | 10010 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`feature_test.py` - Feature testing
-------------------------------------------
A simple test that plots a bunch of different light curves.
The output should be similar to this figure:
.. figure:: ../pysyzygy/img/feature_test.png
:width: 600px
:align: center
:height: 100px
:alt: alternate text
:figclass: align-center
'''
from __future__ import division, print_function, absolute_import, unicode_literals
import pysyzygy as ps
import numpy as np
import matplotlib.pyplot as pl
if __name__ == '__main__':
fig, ax = pl.subplots(5, 5, figsize = (24,16))
fig.subplots_adjust(wspace = 0, hspace = 0, left = 0.01, right = 0.99, bottom = 0.01, top = 0.99)
ax = ax.flatten()
for axis in ax:
axis.xaxis.set_ticklabels([])
axis.yaxis.set_ticklabels([])
# Different radii
time = np.linspace(-0.5,0.5,1000)
for RpRs in [0.1, 0.09, 0.08]:
trn = ps.Transit(RpRs = RpRs)
ax[0].plot(time, trn(time), label = 'RpRs = %.2f' % RpRs)
ax[0].legend(loc = 'lower left', fontsize = 8)
ax[0].margins(0.,0.2)
ax[0].annotate('DIFFERENT RADII', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different periods
time = np.linspace(-0.5,0.5,1000)
for per in [10., 20., 30.]:
trn = ps.Transit(per = per)
ax[1].plot(time, trn(time), label = 'per = %.0f' % per)
ax[1].legend(loc = 'lower left', fontsize = 8)
ax[1].margins(0.,0.2)
ax[1].annotate('DIFFERENT PERIODS', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different impact params
time = np.linspace(-0.5,0.5,1000)
for b in [0., 0.25, 0.5, 0.75, 0.99]:
trn = ps.Transit(b = b)
ax[2].plot(time, trn(time), label = 'b = %.2f' % b)
ax[2].legend(loc = 'lower left', fontsize = 8)
ax[2].margins(0.,0.2)
ax[2].annotate('DIFFERENT IMPACT', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different densities
time = np.linspace(-0.5,0.5,1000)
for rhos in [1.4, 10.0, 0.1]:
trn = ps.Transit(rhos = rhos)
ax[3].plot(time, trn(time), label = 'rhos = %.2f' % rhos)
ax[3].legend(loc = 'lower left', fontsize = 8)
ax[3].margins(0.,0.2)
ax[3].annotate('DIFFERENT STELLAR DENSITIES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different semi-major axes
time = np.linspace(-0.5,0.5,1000)
for aRs in [19.5, 5., 50.]:
trn = ps.Transit(aRs = aRs)
ax[4].plot(time, trn(time), label = 'aRs = %.2f' % aRs)
ax[4].legend(loc = 'lower left', fontsize = 8)
ax[4].margins(0.,0.2)
ax[4].annotate('DIFFERENT SEMI-MAJOR AXES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different masses
time = np.linspace(-0.5,0.5,1000)
for MpMs in [0., 0.1, 1.]:
trn = ps.Transit(MpMs = MpMs)
ax[5].plot(time, trn(time), label = 'MpMs = %.2f' % MpMs)
ax[5].legend(loc = 'lower left', fontsize = 8)
ax[5].margins(0.,0.2)
ax[5].annotate('DIFFERENT MASSES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different eccentricities
time = np.linspace(-0.5,0.5,1000)
for ecc in [0., 0.25, 0.5, 0.75, 0.9]:
trn = ps.Transit(ecc = ecc)
ax[6].plot(time, trn(time), label = 'ecc = %.2f' % ecc)
ax[6].legend(loc = 'lower left', fontsize = 8)
ax[6].margins(0.,0.2)
ax[6].annotate('DIFFERENT ECCENTRICITIES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different omegas
time = np.linspace(-0.5,0.5,1000)
for w in [0., np.pi/8, np.pi/4, np.pi/3, np.pi/2]:
trn = ps.Transit(aRs = 5, ecc = 0.75, w = w)
ax[7].plot(time, trn(time), label = 'w = %.2f' % w)
ax[7].legend(loc = 'lower left', fontsize = 8)
ax[7].margins(0.,0.2)
ax[7].annotate('DIFFERENT OMEGAS', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different esw/ecw
time = np.linspace(-0.5,0.5,1000)
for esw, ecw in zip([0., 0.25, -0.25, 0.5], [0., -0.25, 0.5, 0.25]):
trn = ps.Transit(aRs = 10, esw = esw, ecw = ecw)
ax[8].plot(time, trn(time), label = 'esw = %.2f, ecw = %.2f' % (esw, ecw))
ax[8].legend(loc = 'lower left', fontsize = 8)
ax[8].margins(0.,0.2)
ax[8].annotate('DIFFERENT esinw/ecosw', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different limb darkening 1
time = np.linspace(-0.5,0.5,1000)
for u1, u2 in zip([0., 0.25, 0.5, 0.75], [0., 0.25, -0.5, -0.25]):
trn = ps.Transit(aRs = 5, u1 = u1, u2 = u2)
ax[9].plot(time, trn(time), label = 'u1 = %.2f, u2 = %.2f' % (u1, u2))
ax[9].legend(loc = 'upper center', fontsize = 8)
ax[9].margins(0.,0.2)
ax[9].annotate('DIFFERENT LD', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different limb darkening 2
time = np.linspace(-0.5,0.5,1000)
for q1, q2 in zip([0., 0.25, 0.5, 0.75], [0., 0.5, 0.25, 0.1]):
trn = ps.Transit(aRs = 5, q1 = q1, q2 = q2)
ax[10].plot(time, trn(time), label = 'q1 = %.2f, q2 = %.2f' % (q1, q2))
ax[10].legend(loc = 'upper center', fontsize = 8)
ax[10].margins(0.,0.2)
ax[10].annotate('DIFFERENT LD', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different t0
time = np.linspace(-0.5,0.5,1000)
for t0 in [-0.1, 0., 0.1]:
trn = ps.Transit(t0 = t0)
ax[11].plot(time, trn(time), label = 't0 = %.2f' % t0)
ax[11].legend(loc = 'lower left', fontsize = 8)
ax[11].margins(0.,0.2)
ax[11].annotate('DIFFERENT t0', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different times
time = np.linspace(-0.5,0.5,1000)
for times in [[-0.3, 0.3], [-0.4, 0., 0.4], [0.]]:
trn = ps.Transit(times = times)
ax[12].plot(time, trn(time), label = 'times = %s' % times)
ax[12].legend(loc = 'lower left', fontsize = 8)
ax[12].margins(0.,0.2)
ax[12].annotate('DIFFERENT TRANSIT TIMES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different exposure times
time = np.linspace(-0.5,0.5,1000)
for exptime in [(1765.5/86400.), 10 * (1765.5/86400.), 0.2 * (1765.5/86400.)]:
trn = ps.Transit(aRs = 5, exptime = exptime)
ax[13].plot(time, trn(time), label = 'exptime = %.3f' % exptime)
ax[13].legend(loc = 'lower left', fontsize = 8)
ax[13].margins(0.,0.2)
ax[13].annotate('DIFFERENT EXP TIMES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different number of exposure points
time = np.linspace(-0.5,0.5,1000)
for exppts in [4, 10, 100]:
trn = ps.Transit(aRs = 5, exptime = 0.1, exppts = exppts)
ax[14].plot(time, trn(time), label = 'exppts = %d' % exppts)
ax[14].legend(loc = 'lower left', fontsize = 8)
ax[14].margins(0.,0.2)
ax[14].annotate('DIFFERENT NUMBER OF EXP PTS', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different binning method
time = np.linspace(-0.5,0.5,1000)
for binmethod in [ps.RIEMANN, ps.TRAPEZOID]:
trn = ps.Transit(aRs = 5, binmethod = binmethod, exppts = 4, exptime = 0.1)
ax[15].plot(time, trn(time), label = 'binmethod = %d' % binmethod)
ax[15].legend(loc = 'lower left', fontsize = 8)
ax[15].margins(0.,0.2)
ax[15].annotate('DIFFERENT BIN METHOD', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different integration method (curves should be identical)
time = np.linspace(-0.5,0.5,1000)
for intmethod in [ps.SMARTINT, ps.SLOWINT]:
trn = ps.Transit(aRs = 5, intmethod = intmethod, exppts = 4, exptime = 0.1)
ax[16].plot(time, trn(time), label = 'intmethod = %d' % intmethod)
ax[16].legend(loc = 'lower left', fontsize = 8)
ax[16].margins(0.,0.2)
ax[16].annotate('SHOULD BE IDENTICAL', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different kepler solver (curves should be identical)
time = np.linspace(-0.5,0.5,1000)
for kepsolver in [ps.MDFAST, ps.NEWTON]:
trn = ps.Transit(aRs = 5, ecc = 0.75, w = np.pi/10, kepsolver = kepsolver)
ax[17].plot(time, trn(time), label = 'kepsolver = %d' % kepsolver)
ax[17].legend(loc = 'lower left', fontsize = 8)
ax[17].margins(0.,0.2)
ax[17].annotate('SHOULD BE IDENTICAL', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Binned/unbinned flux
time = np.linspace(-0.5,0.5,1000)
for param in ['binned', 'unbinned']:
trn = ps.Transit(exptime = 0.1)
ax[18].plot(time, trn(time, param = param), label = 'param = %s' % param)
ax[18].legend(loc = 'lower left', fontsize = 8)
ax[18].margins(0.,0.2)
ax[18].annotate('BINNED/UNBINNED', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Mean anomaly / eccentric anomaly / true anomaly
time = np.linspace(-6,6,1000)
trn = ps.Transit(per = 5., ecc = 0.4, fullorbit = True, maxpts = 20000)
for param in ['M', 'E', 'f']:
ax[19].plot(time, trn(time, param = param), label = 'param = %s' % param)
ax[19].legend(loc = 'lower left', fontsize = 8)
ax[19].margins(0.,0.2)
ax[19].annotate('ANOMALIES', xy = (0.05, 0.9), xycoords = 'axes fraction')
# radius / impact parameter
time = np.linspace(-6,6,1000)
trn = ps.Transit(per = 5., ecc = 0.4, fullorbit = True, maxpts = 20000)
for param in ['r', 'b']:
ax[20].plot(time, trn(time, param = param), label = 'param = %s' % param)
ax[20].legend(loc = 'lower left', fontsize = 8)
ax[20].margins(0.,0.2)
ax[20].annotate('r and b', xy = (0.05, 0.9), xycoords = 'axes fraction')
# xyz
time = np.linspace(-5,5,1000)
trn = ps.Transit(ecc = 0.5, w = np.pi/8, b = 0.5, per = 5., fullorbit = True, maxpts = 20000)
ax[21].plot(trn(time, 'x'), trn(time, 'y'), label = 'XY')
ax[21].plot(trn(time, 'x'), trn(time, 'z'), label = 'XZ')
ax[21].plot(trn(time, 'y'), trn(time, 'z'), label = 'YZ')
ax[21].legend(loc = 'lower left', fontsize = 8)
ax[21].margins(0.,0.2)
ax[21].annotate('ORBIT', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Different omega, top view orbit
time = np.linspace(-5,5,1000)
for w in np.arange(4) * np.pi / 4:
trn = ps.Transit(w = w, ecc = 0.5, b = 0., per = 5., fullorbit = True, maxpts = 20000)
ax[22].plot(trn(time, 'x'), trn(time, 'z'), label = r'w = %.0f$^\circ$' % (w * 180./np.pi))
ax[22].axvline(0, alpha = 0.5)
ax[22].axhline(0, alpha = 0.5)
ax[22].legend(loc = 'lower left', fontsize = 8)
ax[22].margins(0.,0.2)
ax[22].annotate('TOP VIEW', xy = (0.05, 0.9), xycoords = 'axes fraction')
# Show the plot
pl.show()
| mit |
iismd17/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
annahs/atmos_research | LEO_pymiecoated_make_lookup.py | 1 | 4811 | from pymiecoated import Mie
import numpy as np
import matplotlib.pyplot as plt
import pickle
import math
import sys
import os
from pprint import pprint
#lookup_dir = 'C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/lookup tables/'
lookup_dir = 'C:/Users/Sarah Hanna/Documents/Data/Alert Data/lookup_tables/'
os.chdir(lookup_dir)
#************setup********************
wl = 1064 #nm
core_RI = complex(2.26,1.26) #from Moteki 2010 (and Laborde and Taylor)
shell_RI = complex(1.5, 0)
PSL_RI = complex(1.59,0)
scat_cal_limit = 500000
def scat_amp(Itotal):
#calc_scattering_amplitude = 112*Itotal #calibration fit is in origin file for LJ-EC_SP2
#calc_scattering_amplitude = 3.72*Itotal+9.65 #calibration from Aquadag numbers Jason sent for LJ
#calc_scattering_amplitude = 71.32*Itotal # ECSP2 WHI 2010
#calc_scattering_amplitude = 425*Itotal#584*Itotal # UBCSP2 WHI 2012
#calc_scattering_amplitude = 545*Itotal # UBCSP2 POLAR6 2015-spring - 545 from slope of PSL calib plot, 446 from 200nm PSL alone
#calc_scattering_amplitude = (545/2.68)*Itotal # UBCSP2 POLAR6 2015-spring - 545 from slope of PSL calib plot, 446 from 200nm PSL alone, /2.68 to scale to Alert calib scattering
calc_scattering_amplitude = (19060)*Itotal # Alert SP2#44 2012 scattering calib
#calc_scattering_amplitude = (10852)*Itotal # Alert SP2#58 2012 scattering calib
#calc_scattering_amplitude = (4062)*Itotal # Alert SP2#58 2015 scattering calib
#calc_scattering_amplitude = 570*Itotal # UBCSP2 POLAR6 2015-spring - 520=min using uncertainty of scattering calibration, 570 = max
return calc_scattering_amplitude
#**********calc the lookup table*************************
#set the maximum shell radius (note: this is a radius not a thickness)
max_shell_rad = 540 #this is radius of core + shell (in nm)
shell_step_size = 0.5 # will give 1nm steps in final diameter
core_step_size = 0.5 # will give 1nm steps in final diameter
#set the range of core radii to be used
core_rad_range = []
core_radius = 30 #60nm dia
while core_radius <= 120: #max dia
core_rad_range.append(core_radius)
core_radius += core_step_size
lookup_table = {}
#set ranges for integration of scattering
incr = 0.1
range1 = []
i=2.5
while i <=77.5:
range1.append(i)
i+=incr
range2 = []
i=102.5
while i <= 167.5:
range2.append(i)
i+=incr
##
print 'core RI: ', core_RI
print 'shell RI: ', shell_RI
for core_rad in core_rad_range:
print 'core diameter: ',core_rad*2, ' nm'
#for each core size create a dictionary of shell radii and associated scattering amplitudes
shell_data = {}
#start at zero coating thickness
shell_rad = core_rad
while shell_rad < max_shell_rad:
mie = Mie()
mie.x = 2*math.pi*core_rad/wl
mie.m = core_RI
mie.y = 2*math.pi*shell_rad/wl
mie.m2 = shell_RI
#calc intensity
Itot_fwd = 0
Itot_back = 0
for theta in range1:
cos_angle=math.cos(math.radians(theta))
S12 = mie.S12(cos_angle)
I1 = S12[0].real**2 + S12[0].imag**2
I2 = S12[1].real**2 + S12[1].imag**2
Itot_fwd = (Itot_fwd + I1 + I2)
for theta in range2:
cos_angle=math.cos(math.radians(theta))
S12 = mie.S12(cos_angle)
I1 = S12[0].real**2 + S12[0].imag**2
I2 = S12[1].real**2 + S12[1].imag**2
Itot_back = (Itot_back + I1 + I2)
Itot = np.mean([Itot_fwd*incr,Itot_back*incr])*math.pi/2
coating_thickness = (shell_rad-core_rad)
scattering_amplitude = scat_amp(Itot)
if scattering_amplitude <= scat_cal_limit:
shell_data[scattering_amplitude] = coating_thickness
if scattering_amplitude > scat_cal_limit:
break
shell_rad+=shell_step_size
##add in the "negative" coatings:
i=0
neg_radius = core_rad
while i <= 100:
if neg_radius <= 10:
break
neg_radius = neg_radius - core_step_size
shell_radius = neg_radius
mie = Mie()
mie.x = 2*math.pi*neg_radius/wl
mie.m = core_RI
mie.y = 2*math.pi*shell_radius/wl
mie.m2 = shell_RI
Itot_fwd = 0
Itot_back = 0
for theta in range1:
cos_angle=math.cos(math.radians(theta))
S12 = mie.S12(cos_angle)
I1 = S12[0].real**2 + S12[0].imag**2
I2 = S12[1].real**2 + S12[1].imag**2
Itot_fwd = (Itot_fwd + I1 + I2)
for theta in range2:
cos_angle=math.cos(math.radians(theta))
S12 = mie.S12(cos_angle)
I1 = S12[0].real**2 + S12[0].imag**2
I2 = S12[1].real**2 + S12[1].imag**2
Itot_back = (Itot_back + I1 + I2)
Itot = np.mean([Itot_fwd*incr,Itot_back*incr])*math.pi/2
i+=1
coating_thickness = (neg_radius-core_rad)
scattering_amplitude = scat_amp(Itot)
shell_data[scattering_amplitude] = coating_thickness
###
core_diameter = core_rad*2+core_step_size
lookup_table[core_diameter] = shell_data
file = open('coating_lookup_table_Alert_SP244_2012PSLcalib_F19060.lupckl', 'w')
pickle.dump(lookup_table, file)
file.close()
| mit |
Code4SA/umibukela | bin/xform.py | 1 | 3733 | import csv
def deep_dict_set(deep_dict, value, layers):
layer = layers[0]
if layers[1:]:
if layer in deep_dict:
deep_dict[layer] = deep_dict_set(deep_dict[layer], value, layers[1:])
else:
deep_dict[layer] = deep_dict_set({}, value, layers[1:])
else:
deep_dict[layer] = value
return deep_dict
def make_row_dict(column_deep_keys, row_values):
row_dict = {}
for col_idx in range(len(column_deep_keys)):
row_dict = deep_dict_set(row_dict, row_values[col_idx], column_deep_keys[col_idx])
return row_dict
class CSVDicts:
def __init__(self, filename):
self.f = open(filename)
self.f = csv.reader(self.f, delimiter=',', quotechar='"')
header = self.f.next()
self.column_deep_keys = map(lambda column: column.split('/'), header)
def __iter__(self):
return self
def next(self):
row = self.f.next()
return make_row_dict(self.column_deep_keys, row)
# l = xform.CSVDicts('SASSA-Citizen_2016_01_19_04_35_51.csv')
# for item in l:
# print item
#
# '{"facility": "paarl", "capturer": "Bukiwe", "surveyor": "H Williams", "meta": {"instanceID": "uuid:26bb3ff3-cafb-41f5-9ef3-5eee2b443192"}, "sim_imei": "354993060885132", "end": "2015-06-02T15:30:36.951+02", "service": "existing_grant_issue", "performance_group": {"respect": "4", "queues": "4", "service_satisfaction": "4", "clean": "4", "information": "4"}, "visit_reason": {"war_veterans": "False", "social_relief": "False", "old_age": "True", "disability": "False", "care_dependency": "False", "in_aid": "False", "foster_child": "False", "child_support": "False"}, "start": "2015-06-02T15:24:29.515+02", "demographics_group": {"gender": "female", "age": "older_60"}, "today": "2015-06-02", "phone_number": "n/a", "yes_no_group": {"documents": "yes", "nametag": "yes", "alt_id": "yes", "bribe": "no"}, "_uuid": "26bb3ff3-cafb-41f5-9ef3-5eee2b443192", "personal_comment": "None", "transport_amount": "twentysix_fifty", "improvements_comment": "n/a", "device_id": "354993060885132", "visit_frequency": "second", "_submission_time": "2015-06-15T12:14:43", "frequency_reason": ":-)no", "service_other": "n/a", "clinic_feedback": "yes", "waiting_time": "thirty_one"}'
#
#...
# today,start,end,device_id,sim_imei,phone_number,facility,surveyor,capturer,visit_reason/not_well,visit_reason/pregnant,visit_reason/emergency,visit_reason/accompanying,visit_reason/regular_collection,visit_reason/other,vist_other,travel_distance,transport_amount,distance,waiting_group/register_time,waiting_group/professional_time,waiting_group/medicine_time,yes_no_group/safety,yes_no_group/examined_private,yes_no_group/consent,yes_no_group/all_medication,yes_no_group/complaint,yes_no_group/complaint_response,personal_comment,performance_group/clean,performance_group/queues,performance_group/respect_admin,performance_group/respect_professionals,performance_group/ambulance,performance_group/equipment,performance_comment,clinic_committee,clinic_committee_function,clinic_feedback,improvements_comment,demographics_group/gender,demographics_group/age,demographics_group/disability,demographics_group/income,meta/instanceID,_uuid,_submission_time
#l = xform.CSVDicts('SASSA-Citizen_2016_01_19_04_35_51.csv')
#r = csv.DictReader(f, delimiter=',', quotechar='"')
import pandas as pd
df = pd.read_csv('DOH-Citizen-Survey_2016_01_25_04_22_15.csv', sep=',', encoding='latin-1')
folweni = df[df['facility'] == 'folweni']
cols = ['facility', 'demographics_group/gender', 'yes_no_group/all_medication']
q1 = folweni.loc[:,cols]
q1groupcount = q1.groupby(['demographics_group/gender', 'yes_no_group/all_medication']).count()
q1groupcount.loc['female','no']
| mit |
jmetzen/scikit-learn | sklearn/utils/tests/test_utils.py | 35 | 9000 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from scipy.linalg import eigh
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
jniediek/mne-python | mne/tests/test_label.py | 3 | 33801 | import os
import os.path as op
import shutil
import glob
import warnings
import numpy as np
from scipy import sparse
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
from mne.datasets import testing
from mne import (read_label, stc_to_label, read_source_estimate,
read_source_spaces, grow_labels, read_labels_from_annot,
write_labels_to_annot, split_label, spatial_tris_connectivity,
read_surface)
from mne.label import Label, _blend_colors, label_sign_flip
from mne.utils import (_TempDir, requires_sklearn, get_subjects_dir,
run_tests_if_main, slow_test)
from mne.fixes import assert_is, assert_is_not
from mne.label import _n_colors
from mne.source_space import SourceSpaces
from mne.source_estimate import mesh_edges
from mne.externals.six import string_types
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
stc_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-lh.stc')
real_label_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-lh.label')
real_label_rh_fname = op.join(data_path, 'MEG', 'sample', 'labels',
'Aud-rh.label')
v1_label_fname = op.join(subjects_dir, 'sample', 'label', 'lh.V1.label')
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
src_bad_fname = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
label_dir = op.join(subjects_dir, 'sample', 'label', 'aparc')
test_path = op.join(op.split(__file__)[0], '..', 'io', 'tests', 'data')
label_fname = op.join(test_path, 'test-lh.label')
label_rh_fname = op.join(test_path, 'test-rh.label')
# This code was used to generate the "fake" test labels:
# for hemi in ['lh', 'rh']:
# label = Label(np.unique((np.random.rand(100) * 10242).astype(int)),
# hemi=hemi, comment='Test ' + hemi, subject='fsaverage')
# label.save(op.join(test_path, 'test-%s.label' % hemi))
# XXX : this was added for backward compat and keep the old test_label_in_src
def _stc_to_label(stc, src, smooth, subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : int
Number of smoothing iterations.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
src = stc.subject if src is None else src
if isinstance(src, string_types):
subject = src
else:
subject = stc.subject
if isinstance(src, string_types):
subjects_dir = get_subjects_dir(subjects_dir)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from,
'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from,
'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
labels = []
cnt = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
this_labels = None
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
for k in range(smooth):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
this_labels.append(label)
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def assert_labels_equal(l0, l1, decimal=5, comment=True, color=True):
if comment:
assert_equal(l0.comment, l1.comment)
if color:
assert_equal(l0.color, l1.color)
for attr in ['hemi', 'subject']:
attr0 = getattr(l0, attr)
attr1 = getattr(l1, attr)
msg = "label.%s: %r != %r" % (attr, attr0, attr1)
assert_equal(attr0, attr1, msg)
for attr in ['vertices', 'pos', 'values']:
a0 = getattr(l0, attr)
a1 = getattr(l1, attr)
assert_array_almost_equal(a0, a1, decimal)
def test_copy():
"""Test label copying"""
label = read_label(label_fname)
label_2 = label.copy()
label_2.pos += 1
assert_array_equal(label.pos, label_2.pos - 1)
def test_label_subject():
"""Test label subject name extraction
"""
label = read_label(label_fname)
assert_is(label.subject, None)
assert_true('unknown' in repr(label))
label = read_label(label_fname, subject='fsaverage')
assert_true(label.subject == 'fsaverage')
assert_true('fsaverage' in repr(label))
def test_label_addition():
"""Test label addition
"""
pos = np.random.RandomState(0).rand(10, 3)
values = np.arange(10.) / 10
idx0 = list(range(7))
idx1 = list(range(7, 10)) # non-overlapping
idx2 = list(range(5, 10)) # overlapping
l0 = Label(idx0, pos[idx0], values[idx0], 'lh', color='red')
l1 = Label(idx1, pos[idx1], values[idx1], 'lh')
l2 = Label(idx2, pos[idx2], values[idx2], 'lh', color=(0, 1, 0, .5))
assert_equal(len(l0), len(idx0))
l_good = l0.copy()
l_good.subject = 'sample'
l_bad = l1.copy()
l_bad.subject = 'foo'
assert_raises(ValueError, l_good.__add__, l_bad)
assert_raises(TypeError, l_good.__add__, 'foo')
assert_raises(ValueError, l_good.__sub__, l_bad)
assert_raises(TypeError, l_good.__sub__, 'foo')
# adding non-overlapping labels
l01 = l0 + l1
assert_equal(len(l01), len(l0) + len(l1))
assert_array_equal(l01.values[:len(l0)], l0.values)
assert_equal(l01.color, l0.color)
# subtraction
assert_labels_equal(l01 - l0, l1, comment=False, color=False)
assert_labels_equal(l01 - l1, l0, comment=False, color=False)
# adding overlappig labels
l = l0 + l2
i0 = np.where(l0.vertices == 6)[0][0]
i2 = np.where(l2.vertices == 6)[0][0]
i = np.where(l.vertices == 6)[0][0]
assert_equal(l.values[i], l0.values[i0] + l2.values[i2])
assert_equal(l.values[0], l0.values[0])
assert_array_equal(np.unique(l.vertices), np.unique(idx0 + idx2))
assert_equal(l.color, _blend_colors(l0.color, l2.color))
# adding lh and rh
l2.hemi = 'rh'
# this now has deprecated behavior
bhl = l0 + l2
assert_equal(bhl.hemi, 'both')
assert_equal(len(bhl), len(l0) + len(l2))
assert_equal(bhl.color, l.color)
assert_true('BiHemiLabel' in repr(bhl))
# subtraction
assert_labels_equal(bhl - l0, l2)
assert_labels_equal(bhl - l2, l0)
bhl2 = l1 + bhl
assert_labels_equal(bhl2.lh, l01)
assert_equal(bhl2.color, _blend_colors(l1.color, bhl.color))
assert_array_equal((l2 + bhl).rh.vertices, bhl.rh.vertices) # rh label
assert_array_equal((bhl + bhl).lh.vertices, bhl.lh.vertices)
assert_raises(TypeError, bhl.__add__, 5)
# subtraction
bhl_ = bhl2 - l1
assert_labels_equal(bhl_.lh, bhl.lh, comment=False, color=False)
assert_labels_equal(bhl_.rh, bhl.rh)
assert_labels_equal(bhl2 - l2, l0 + l1)
assert_labels_equal(bhl2 - l1 - l0, l2)
bhl_ = bhl2 - bhl2
assert_array_equal(bhl_.vertices, [])
@testing.requires_testing_data
def test_label_in_src():
"""Test label in src"""
src = read_source_spaces(src_fname)
label = read_label(v1_label_fname)
# construct label from source space vertices
vert_in_src = np.intersect1d(label.vertices, src[0]['vertno'], True)
where = np.in1d(label.vertices, vert_in_src)
pos_in_src = label.pos[where]
values_in_src = label.values[where]
label_src = Label(vert_in_src, pos_in_src, values_in_src,
hemi='lh').fill(src)
# check label vertices
vertices_status = np.in1d(src[0]['nearest'], label.vertices)
vertices_in = np.nonzero(vertices_status)[0]
vertices_out = np.nonzero(np.logical_not(vertices_status))[0]
assert_array_equal(label_src.vertices, vertices_in)
assert_array_equal(np.in1d(vertices_out, label_src.vertices), False)
# check values
value_idx = np.digitize(src[0]['nearest'][vertices_in], vert_in_src, True)
assert_array_equal(label_src.values, values_in_src[value_idx])
# test exception
vertices = np.append([-1], vert_in_src)
assert_raises(ValueError, Label(vertices, hemi='lh').fill, src)
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
"""Test IO for label + stc files
"""
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert_true(len(stc_label.times) == stc_label.data.shape[1])
assert_true(len(stc_label.vertices[0]) == stc_label.data.shape[0])
@testing.requires_testing_data
def test_label_io():
"""Test IO of label files
"""
tempdir = _TempDir()
label = read_label(label_fname)
# label attributes
assert_equal(label.name, 'test-lh')
assert_is(label.subject, None)
assert_is(label.color, None)
# save and reload
label.save(op.join(tempdir, 'foo'))
label2 = read_label(op.join(tempdir, 'foo-lh.label'))
assert_labels_equal(label, label2)
# pickling
dest = op.join(tempdir, 'foo.pickled')
with open(dest, 'wb') as fid:
pickle.dump(label, fid, pickle.HIGHEST_PROTOCOL)
with open(dest, 'rb') as fid:
label2 = pickle.load(fid)
assert_labels_equal(label, label2)
def _assert_labels_equal(labels_a, labels_b, ignore_pos=False):
"""Make sure two sets of labels are equal"""
for label_a, label_b in zip(labels_a, labels_b):
assert_array_equal(label_a.vertices, label_b.vertices)
assert_true(label_a.name == label_b.name)
assert_true(label_a.hemi == label_b.hemi)
if not ignore_pos:
assert_array_equal(label_a.pos, label_b.pos)
@testing.requires_testing_data
def test_annot_io():
"""Test I/O from and to *.annot files"""
# copy necessary files from fsaverage to tempdir
tempdir = _TempDir()
subject = 'fsaverage'
label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
label_dir = os.path.join(tempdir, subject, 'label')
surf_dir = os.path.join(tempdir, subject, 'surf')
os.makedirs(label_dir)
os.mkdir(surf_dir)
shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)
# read original labels
assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
subjects_dir=tempdir)
labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
subjects_dir=tempdir)
# test saving parcellation only covering one hemisphere
parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
assert_equal(len(parc1), len(parc))
for l1, l in zip(parc1, parc):
assert_labels_equal(l1, l)
# test saving only one hemisphere
parc = [l for l in labels if l.name.startswith('LOBE')]
write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
subjects_dir=tempdir)
annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
assert_true(os.path.isfile(annot_fname % 'l'))
assert_false(os.path.isfile(annot_fname % 'r'))
parc1 = read_labels_from_annot(subject, 'myparc2',
annot_fname=annot_fname % 'l',
subjects_dir=tempdir)
parc_lh = [l for l in parc if l.name.endswith('lh')]
for l1, l in zip(parc1, parc_lh):
assert_labels_equal(l1, l)
@testing.requires_testing_data
def test_read_labels_from_annot():
"""Test reading labels from FreeSurfer parcellation
"""
# test some invalid inputs
assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
subjects_dir=subjects_dir)
assert_raises(ValueError, read_labels_from_annot, 'sample',
annot_fname='bla.annot', subjects_dir=subjects_dir)
# read labels using hemi specification
labels_lh = read_labels_from_annot('sample', hemi='lh',
subjects_dir=subjects_dir)
for label in labels_lh:
assert_true(label.name.endswith('-lh'))
assert_true(label.hemi == 'lh')
assert_is_not(label.color, None)
# read labels using annot_fname
annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
subjects_dir=subjects_dir)
for label in labels_rh:
assert_true(label.name.endswith('-rh'))
assert_true(label.hemi == 'rh')
assert_is_not(label.color, None)
# combine the lh, rh, labels and sort them
labels_lhrh = list()
labels_lhrh.extend(labels_lh)
labels_lhrh.extend(labels_rh)
names = [label.name for label in labels_lhrh]
labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]
# read all labels at once
labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result
_assert_labels_equal(labels_lhrh, labels_both)
# aparc has 68 cortical labels
assert_true(len(labels_both) == 68)
# test regexp
label = read_labels_from_annot('sample', parc='aparc.a2009s',
regexp='Angu', subjects_dir=subjects_dir)[0]
assert_true(label.name == 'G_pariet_inf-Angular-lh')
# silly, but real regexp:
label = read_labels_from_annot('sample', 'aparc.a2009s',
regexp='.*-.{4,}_.{3,3}-L',
subjects_dir=subjects_dir)[0]
assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
assert_raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
annot_fname=annot_fname, regexp='JackTheRipper',
subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_labels_from_annot_annot2labels():
"""Test reading labels from parc. by comparing with mne_annot2labels
"""
label_fnames = glob.glob(label_dir + '/*.label')
label_fnames.sort()
labels_mne = [read_label(fname) for fname in label_fnames]
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# we have the same result, mne does not fill pos, so ignore it
_assert_labels_equal(labels, labels_mne, ignore_pos=True)
@testing.requires_testing_data
def test_write_labels_to_annot():
"""Test writing FreeSurfer parcellation from labels"""
tempdir = _TempDir()
labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
# create temporary subjects-dir skeleton
surf_dir = op.join(subjects_dir, 'sample', 'surf')
temp_surf_dir = op.join(tempdir, 'sample', 'surf')
os.makedirs(temp_surf_dir)
shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
os.makedirs(op.join(tempdir, 'sample', 'label'))
# test automatic filenames
dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test1')))
assert_true(op.exists(dst % ('rh', 'test1')))
# lh only
for label in labels:
if label.hemi == 'lh':
break
write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test2')))
assert_true(op.exists(dst % ('rh', 'test2')))
# rh only
for label in labels:
if label.hemi == 'rh':
break
write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
assert_true(op.exists(dst % ('lh', 'test3')))
assert_true(op.exists(dst % ('rh', 'test3')))
# label alone
assert_raises(TypeError, write_labels_to_annot, labels[0], 'sample',
'test4', subjects_dir=tempdir)
# write left and right hemi labels with filenames:
fnames = [op.join(tempdir, hemi + '-myparc') for hemi in ['lh', 'rh']]
with warnings.catch_warnings(record=True): # specify subject_dir param
for fname in fnames:
write_labels_to_annot(labels, annot_fname=fname)
# read it back
labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels2.extend(labels22)
names = [label.name for label in labels2]
for label in labels:
idx = names.index(label.name)
assert_labels_equal(label, labels2[idx])
# same with label-internal colors
for fname in fnames:
write_labels_to_annot(labels, 'sample', annot_fname=fname,
overwrite=True, subjects_dir=subjects_dir)
labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[0])
labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
annot_fname=fnames[1])
labels3.extend(labels33)
names3 = [label.name for label in labels3]
for label in labels:
idx = names3.index(label.name)
assert_labels_equal(label, labels3[idx])
# make sure we can't overwrite things
assert_raises(ValueError, write_labels_to_annot, labels, 'sample',
annot_fname=fnames[0], subjects_dir=subjects_dir)
# however, this works
write_labels_to_annot(labels, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# label without color
labels_ = labels[:]
labels_[0] = labels_[0].copy()
labels_[0].color = None
write_labels_to_annot(labels_, 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
# duplicate color
labels_[0].color = labels_[2].color
assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# invalid color inputs
labels_[0].color = (1.1, 1., 1., 1.)
assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# overlapping labels
labels_ = labels[:]
cuneus_lh = labels[6]
precuneus_lh = labels[50]
labels_.append(precuneus_lh + cuneus_lh)
assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
annot_fname=fnames[0], overwrite=True,
subjects_dir=subjects_dir)
# unlabeled vertices
labels_lh = [label for label in labels if label.name.endswith('lh')]
write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
overwrite=True, subjects_dir=subjects_dir)
labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
subjects_dir=subjects_dir)
assert_equal(len(labels_lh), len(labels_reloaded))
label0 = labels_lh[0]
label1 = labels_reloaded[-1]
assert_equal(label1.name, "unknown-lh")
assert_true(np.all(np.in1d(label0.vertices, label1.vertices)))
# unnamed labels
labels4 = labels[:]
labels4[0].name = None
assert_raises(ValueError, write_labels_to_annot, labels4,
annot_fname=fnames[0])
@requires_sklearn
@testing.requires_testing_data
def test_split_label():
"""Test splitting labels"""
aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
regexp='lingual', subjects_dir=subjects_dir)
lingual = aparc[0]
# Test input error
assert_raises(ValueError, lingual.split, 'bad_input_string')
# split with names
parts = ('lingual_post', 'lingual_ant')
post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)
# check output names
assert_equal(post.name, parts[0])
assert_equal(ant.name, parts[1])
# check vertices add up
lingual_reconst = post + ant
lingual_reconst.name = lingual.name
lingual_reconst.comment = lingual.comment
lingual_reconst.color = lingual.color
assert_labels_equal(lingual_reconst, lingual)
# compare output of Label.split() method
post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
assert_labels_equal(post1, post)
assert_labels_equal(ant1, ant)
# compare fs_like split with freesurfer split
antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
assert_array_equal(antmost.vertices, fs_vert)
# check default label name
assert_equal(antmost.name, "lingual_div40-lh")
# Apply contiguous splitting to DMN label from parcellation in Yeo, 2011
label_default_mode = read_label(op.join(subjects_dir, 'fsaverage', 'label',
'lh.7Networks_7.label'))
DMN_sublabels = label_default_mode.split(parts='contiguous',
subject='fsaverage',
subjects_dir=subjects_dir)
assert_equal([len(label.vertices) for label in DMN_sublabels],
[16181, 7022, 5965, 5300, 823] + [1] * 23)
@slow_test
@testing.requires_testing_data
@requires_sklearn
def test_stc_to_label():
"""Test stc_to_label
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src = read_source_spaces(fwd_fname)
src_bad = read_source_spaces(src_bad_fname)
stc = read_source_estimate(stc_fname, 'sample')
os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
labels1 = _stc_to_label(stc, src='sample', smooth=3)
labels2 = _stc_to_label(stc, src=src, smooth=3)
assert_equal(len(labels1), len(labels2))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
connected=True)
assert_true(len(w) > 0)
assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
connected=True)
assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
connected=True)
assert_equal(len(labels_lh), 1)
assert_equal(len(labels_rh), 1)
# test getting tris
tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
assert_raises(ValueError, spatial_tris_connectivity, tris,
remap_vertices=False)
connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
assert_true(connectivity.shape[0] == len(stc.vertices[0]))
# "src" as a subject name
assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False,
connected=False, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
smooth=False, connected=False, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
connected=True, subjects_dir=subjects_dir)
assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
connected=False, subjects_dir=subjects_dir)
labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
connected=False,
subjects_dir=subjects_dir)
assert_true(len(labels_lh) > 1)
assert_true(len(labels_rh) > 1)
# with smooth='patch'
with warnings.catch_warnings(record=True) as w: # connectedness warning
warnings.simplefilter('always')
labels_patch = stc_to_label(stc, src=src, smooth=True)
assert_equal(len(w), 1)
assert_equal(len(labels_patch), len(labels1))
for l1, l2 in zip(labels1, labels2):
assert_labels_equal(l1, l2, decimal=4)
@slow_test
@testing.requires_testing_data
def test_morph():
"""Test inter-subject label morphing
"""
label_orig = read_label(real_label_fname)
label_orig.subject = 'sample'
# should work for specifying vertices for both hemis, or just the
# hemi of the given label
vals = list()
for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
label = label_orig.copy()
# this should throw an error because the label has all zero values
assert_raises(ValueError, label.morph, 'sample', 'fsaverage')
label.values.fill(1)
label = label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1)
label = label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2)
assert_true(np.in1d(label_orig.vertices, label.vertices).all())
assert_true(len(label.vertices) < 3 * len(label_orig.vertices))
vals.append(label.vertices)
assert_array_equal(vals[0], vals[1])
# make sure label smoothing can run
assert_equal(label.subject, 'sample')
verts = [np.arange(10242), np.arange(10242)]
for hemi in ['lh', 'rh']:
label.hemi = hemi
label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
assert_raises(TypeError, label.morph, None, 1, 5, verts,
subjects_dir, 2)
assert_raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
subjects_dir, 2)
with warnings.catch_warnings(record=True): # morph map could be missing
label.smooth(subjects_dir=subjects_dir) # make sure this runs
@testing.requires_testing_data
def test_grow_labels():
"""Test generation of circular source labels"""
seeds = [0, 50000]
# these were chosen manually in mne_analyze
should_be_in = [[49, 227], [51207, 48794]]
hemis = [0, 1]
names = ['aneurism', 'tumor']
labels = grow_labels('sample', seeds, 3, hemis, subjects_dir, names=names)
tgt_names = ['aneurism-lh', 'tumor-rh']
tgt_hemis = ['lh', 'rh']
for label, seed, hemi, sh, name in zip(labels, seeds, tgt_hemis,
should_be_in, tgt_names):
assert_true(np.any(label.vertices == seed))
assert_true(np.all(np.in1d(sh, label.vertices)))
assert_equal(label.hemi, hemi)
assert_equal(label.name, name)
# grow labels with and without overlap
seeds = [57532, [58887, 6304]]
l01, l02 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir)
seeds = [57532, [58887, 6304]]
l11, l12 = grow_labels('fsaverage', seeds, 20, [0, 0], subjects_dir,
overlap=False)
# test label naming
assert_equal(l01.name, 'Label_0-lh')
assert_equal(l02.name, 'Label_1-lh')
assert_equal(l11.name, 'Label_0-lh')
assert_equal(l12.name, 'Label_1-lh')
# make sure set 1 does not overlap
overlap = np.intersect1d(l11.vertices, l12.vertices, True)
assert_array_equal(overlap, [])
# make sure both sets cover the same vertices
l0 = l01 + l02
l1 = l11 + l12
assert_array_equal(l1.vertices, l0.vertices)
@testing.requires_testing_data
def test_label_sign_flip():
"""Test label sign flip computation"""
src = read_source_spaces(src_fname)
label = Label(vertices=src[0]['vertno'][:5], hemi='lh')
src[0]['nn'][label.vertices] = np.array(
[[1., 0., 0.],
[0., 1., 0.],
[0, 0, 1.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.],
[1. / np.sqrt(2), 1. / np.sqrt(2), 0.]])
known_flips = np.array([1, 1, np.nan, 1, 1])
idx = [0, 1, 3, 4] # indices that are usable (third row is orthognoal)
flip = label_sign_flip(label, src)
# Need the abs here because the direction is arbitrary
assert_array_almost_equal(np.abs(np.dot(flip[idx], known_flips[idx])),
len(idx))
@testing.requires_testing_data
def test_label_center_of_mass():
"""Test computing the center of mass of a label"""
stc = read_source_estimate(stc_fname)
stc.lh_data[:] = 0
vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0]
assert_equal(vertex_stc, 124791)
label = Label(stc.vertices[1], pos=None, values=stc.rh_data.mean(axis=1),
hemi='rh', subject='sample')
vertex_label = label.center_of_mass(subjects_dir=subjects_dir)
assert_equal(vertex_label, vertex_stc)
labels = read_labels_from_annot('sample', parc='aparc.a2009s',
subjects_dir=subjects_dir)
src = read_source_spaces(src_fname)
# Try a couple of random ones, one from left and one from right
# Visually verified in about the right place using mne_analyze
for label, expected in zip([labels[2], labels[3], labels[-5]],
[141162, 145221, 55979]):
label.values[:] = -1
assert_raises(ValueError, label.center_of_mass,
subjects_dir=subjects_dir)
label.values[:] = 1
assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=label.vertices),
expected)
# restrict to source space
idx = 0 if label.hemi == 'lh' else 1
# this simple nearest version is not equivalent, but is probably
# close enough for many labels (including the test ones):
pos = label.pos[np.where(label.vertices == expected)[0][0]]
pos = (src[idx]['rr'][src[idx]['vertno']] - pos)
pos = np.argmin(np.sum(pos * pos, axis=1))
src_expected = src[idx]['vertno'][pos]
# see if we actually get the same one
src_restrict = np.intersect1d(label.vertices, src[idx]['vertno'])
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src_restrict),
src_expected)
assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=src),
src_expected)
# degenerate cases
assert_raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir,
restrict_vertices='foo')
assert_raises(TypeError, label.center_of_mass, subjects_dir=subjects_dir,
surf=1)
assert_raises(IOError, label.center_of_mass, subjects_dir=subjects_dir,
surf='foo')
run_tests_if_main()
| bsd-3-clause |
jlegendary/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/datasets/mlcomp.py | 46 | 4089 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
from sklearn.utils import deprecated
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
@deprecated("since the http://mlcomp.org/ website will shut down "
"in March 2017, the load_mlcomp function was deprecated "
"in version 0.19 and will be removed in 0.21.")
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| mit |
matthewalbani/scipy | tools/refguide_check.py | 5 | 28531 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots' # old aliases for scipy.special.*_roots
]
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
int_pattern = re.compile('^[0-9]+L?$')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# python 2 long integers are equal to python 3 integers
if self.int_pattern.match(want) and self.int_pattern.match(got):
if want.rstrip("L\r\n") == got.rstrip("L\r\n"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
tut_path = os.path.join(os.getcwd(), 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % tut_path)
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
SGenheden/Scripts | Plot/plot_multibars.py | 1 | 3660 | # Author: Samuel Genheden, [email protected]
"""
Program to make bar plots of data
DOES NOT work great
"""
import argparse
import numpy as np
import matplotlib.pylab as plt
from sgenlib import colors
from sgenlib import parsing
def _plot_single(data, errors, headers, barlabels, ylabel, fig):
a = fig.add_axes((0.2,0.15,0.75,0.75))
ngroups = data.shape[0]
nitems = len(headers)
print ngroups, nitems
gleft = 0.8*(np.arange(nitems)+1.0)
width = gleft[-1] - gleft[0]
left = np.asarray([gleft+(width + 1.6)*i for i in range(ngroups)]).reshape(ngroups*nitems)
gcolor = [colors.color(i) for i in range(nitems)]
color = []
labels = []
for i in range(ngroups):
color.extend(gcolor)
labels.extend("")
a.bar(left, data.reshape(ngroups*nitems), yerr=errors.reshape(ngroups*nitems),
width=0.8, color=color, error_kw=dict(ecolor='k'), label=headers)
a.set_xticks(left[::2]+width)
a.set_xticklabels(barlabels)
#a.legend()
h, l = a.get_legend_handles_labels()
a.legend(h[0], headers, loc='best', fancybox=True, framealpha=0.5,fontsize=8,labelspacing=0.20)
for tick in a.xaxis.get_major_ticks() :
tick.label.set_fontsize(8)
for tick in a.yaxis.get_major_ticks() :
tick.label.set_fontsize(8)
a.set_ylim([data.min()-3.0,data.max()+3.0])
#for i,h in enumerate(barlabels):
# a.text(left[nitems*i]+0.4,data.min()-6.0,h, size=8)
a.set_ylabel(ylabel, fontsize=8)
a.set_xlabel("Docking pose", fontsize=8)
def _plot_multi(data, errors, headers, barlabels, nrow, ncol, fig):
for i, (idata, ierrors, header) in enumerate(zip(data.T, errors.T, headers), 1):
a = fig.add_subplot(nrow, ncol, i)
left = (np.arange(data.shape[0])+1)*2-0.4
barcolors = [colors.color(i) for i in range(data.shape[0])]
a.bar(left, idata, yerr=ierrors, color=barcolors, error_kw=dict(ecolor='k'))
a.set_xticks(left+0.4)
a.set_xticklabels(barlabels, rotation=45)
a.set_ylabel(header, fontsize=8)
for tick in a.xaxis.get_major_ticks() :
tick.label.set_fontsize(8)
for tick in a.yaxis.get_major_ticks() :
tick.label.set_fontsize(8)
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="Plot bar plots")
parser.add_argument('-f','--file',help="the data for the bar plots")
parser.add_argument('-y','--ylabel',help="the ylabel", default="")
parser.add_argument('--single', action="store_true", help="plot all in one sub-plot", default=False)
parser.add_argument('-o','--out',help="the output filename",default="bar.png")
args = parser.parse_args()
with open(args.file,"r") as f :
lines = f.readlines()
headers = lines[0].strip().split("\t")[::2]
barlabels = []
data = []
for line in lines[1:]:
cols = line.strip().split("\t")
barlabels.append(cols[0])
data.append(cols[1:])
data = np.array(data, dtype=float)
errors = data[:,1::2]
data = data[:,::2]
print data, errors, headers, barlabels
if args.single :
ncol = 1
nrow = 1
else :
if len(headers) == 2 :
ncol = 1
nrow = 2
else:
ncol = 2
nrow = int(np.ceil(len(headers)/2.0))
widths = [0,3.25,7]
f = plt.figure(0, figsize=(widths[ncol], 2.5*nrow))
if args.single :
_plot_single(data, errors, headers, barlabels, args.ylabel, f)
else :
_plot_multi(data, errors, headers, barlabels, nrow, ncol, f)
#f.tight_layout()
f.savefig(args.out, format="png", dpi=300)
| mit |
mbalasso/mynumpy | numpy/fft/fftpack.py | 9 | 39261 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
| bsd-3-clause |
daler/metaseq | metaseq/plotutils.py | 3 | 34743 | """
Module with handy utilities for plotting genomic signal
"""
import itertools
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import mlab
import numpy as np
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import gridspec
import colormap_adjust
from scipy import stats
def ci_plot(x, arr, conf=0.95, ax=None, line_kwargs=None, fill_kwargs=None):
"""
Plots the mean and 95% ci for the given array on the given axes
Parameters
----------
x : 1-D array-like
x values for the plot
arr : 2-D array-like
The array to calculate mean and std for
conf : float [.5 - 1]
Confidence interval to use
ax : matplotlib.Axes
The axes object on which to plot
line_kwargs : dict
Additional kwargs passed to Axes.plot
fill_kwargs : dict
Additiona kwargs passed to Axes.fill_between
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
line_kwargs = line_kwargs or {}
fill_kwargs = fill_kwargs or {}
m, lo, hi = ci(arr, conf)
ax.plot(x, m, **line_kwargs)
ax.fill_between(x, lo, hi, **fill_kwargs)
return ax
def imshow(arr, x=None, ax=None, vmin=None, vmax=None, percentile=True,
strip=False, features=None, conf=0.95, sort_by=None,
line_kwargs=None, fill_kwargs=None, imshow_kwargs=None, figsize=(5, 12),
width_ratios=(4, 1), height_ratios=(4, 1),
subplot_params=dict(wspace=0.1, hspace=0.1),
subset_by=None, subset_order=None,):
"""
Do-it-all function to help with plotting heatmaps
Parameters
----------
arr : array-like
x : 1D array
X values to use. If None, use range(arr.shape[1])
ax : matplotlib.Axes
If not None, then only plot the array on the provided axes. This will
ignore any additional arguments provided that apply to figure-level
configuration or to the average line plot. For example, `figsize`,
`width_ratios`, `height_ratios`, `subplot_params`, `line_kwargs`, and
`fill_kwargs` will all be ignored.
vmin, vmax : float
percentile : bool
If True, then treat values for `vmin` and `vmax` as percentiles rather
than absolute values.
strip : bool
Include a strip plot alongside the array
features : pybedtools.BedTool or string filename
Features used to construct the array
conf : float
Confidence interval to use in line plot.
sort_by : array-like
Use the provided array to sort the array (e.g., an array of expression
values). This array will be argsorted to get the proper order.
line_kwargs, fill_kwargs : dict
Passed directly to `ci_plot`.
figsize : tuple
(Width, height) of the figure to create.
imshow_kwargs : dict
Passed directly to matplotlib.pyplot.imshow. By default, arguments
used are `origin='lower'`, `aspect="auto"` and a colormap from
colormap_adjust.smart_colormap generated using the provided `vmin` and
`vmax`.
width_ratios, height_ratios: tuple
These tuples are passed to the `new_shell` function. The default
values set up a 2x2 configuration of panels for heatmap, line plot,
colorbar axes, and optional strip plot. However modifying
`width_ratios` or `height_ratios` can be used to create more or fewer panels.
subplot_params : dict
Passed to Figure.subplots_adjust
subset_by : array
An array of any type (but usually int or str) that contains a class
label for each row in the heatmap array. For example, to subset by
expression, an array the values of "up", "down", or "unchanged" at each
of the positions could be provided.
Note that the heatmap array is first sorted by `sort_by` and then split
into groups according to `subset_by`, so each subset remains sorted by
`sort_by`.
subset_order : list-like
This provides the order in which the subsets are plotted. Since the
default imshow arguments contain `origin="lower"`, these will be
plotted in order starting at the bottom of the heatmap.
"""
if ax is None:
fig = new_shell(
figsize=figsize,
strip=strip,
subplot_params=subplot_params,
width_ratios=width_ratios,
height_ratios=height_ratios)
if x is None:
x = np.arange(arr.shape[1] + 1)
if percentile:
if vmin is None:
vmin = arr.min()
else:
vmin = mlab.prctile(arr.ravel(), vmin)
if vmax is None:
vmax = arr.max()
else:
vmax = mlab.prctile(arr.ravel(), vmax)
else:
if vmin is None:
vmin = arr.min()
if vmax is None:
vmax = arr.max()
cmap = colormap_adjust.smart_colormap(vmin, vmax)
_imshow_kwargs = dict(origin='lower', cmap=cmap, vmin=vmin, vmax=vmax,
aspect='auto')
if imshow_kwargs is not None:
_imshow_kwargs.update(imshow_kwargs)
# previously we did an argsort first; with subsetting we don't want to do
# that yet....
#if sort_by is not None:
# ind = np.argsort(sort_by)
#else:
# ind = np.arange(arr.shape[0])
if sort_by is None:
sort_by = np.arange(arr.shape[0])
if ax is None:
array_ax = fig.array_axes
else:
array_ax = ax
# If not provided, assume all in the same subset.
if subset_by is None:
subset_by = np.zeros(arr.shape[0])
# Ensure always array, since we're doing indexing tricks
if not isinstance(subset_by, np.ndarray):
subset_by = np.array(subset_by)
# If not provided, use sorted order
if subset_order is None:
subset_order = sorted(np.unique(subset_by))
inds = []
for cls in subset_order:
subset_ind = np.nonzero(subset_by == cls)[0]
subset_sort_by = sort_by[subset_ind]
subset_argsort_by = np.argsort(subset_sort_by)
inds.append(subset_ind[subset_argsort_by])
ind = np.concatenate(inds)
mappable = array_ax.imshow(
arr[ind, :],
extent=(x.min(), x.max(), 0, arr.shape[0]),
**_imshow_kwargs
)
if line_kwargs is None:
line_kwargs = {}
if fill_kwargs is None:
fill_kwargs = {}
if isinstance(line_kwargs, dict):
line_kwargs = [line_kwargs]
if isinstance(fill_kwargs, dict):
fill_kwargs = [fill_kwargs]
_line_kwargs = itertools.cycle(line_kwargs)
_fill_kwargs = itertools.cycle(fill_kwargs)
if ax is None:
plt.colorbar(mappable, fig.cax)
for subset_ind, label, _lkw, _fkw in zip(inds, subset_order, _line_kwargs, _fill_kwargs):
ci_plot(
x,
arr[subset_ind],
ax=fig.line_axes,
line_kwargs=_lkw,
fill_kwargs=_fkw,
)
return fig
else:
return ax.figure
def add_labels_to_subsets(ax, subset_by, subset_order, text_kwargs=None,
add_hlines=True, hline_kwargs=None):
"""
Helper function for adding labels to subsets within a heatmap.
Assumes that imshow() was called with `subsets` and `subset_order`.
Parameters
----------
ax : matplotlib.Axes
The axes to label. Generally you can use `fig.array_axes` attribute of
the Figure object returned by `metaseq.plotutils.imshow`.
subset_by, subset_order : array, list
See `metaseq.plotutils.imshow()` docstring; these should be the same
`subsets` and `subset_order` that were provided to that function.
"""
_text_kwargs = dict(transform=ax.get_yaxis_transform())
if text_kwargs:
_text_kwargs.update(text_kwargs)
_hline_kwargs = dict(color='k')
if hline_kwargs:
_hline_kwargs.update(hline_kwargs)
pos = 0
for label in subset_order:
ind = subset_by == label
last_pos = pos
pos += sum(ind)
if add_hlines:
ax.axhline(pos, **_hline_kwargs)
ax.text(
1.1,
last_pos + (pos - last_pos)/2.0,
label,
**_text_kwargs)
def calculate_limits(array_dict, method='global', percentiles=None, limit=()):
"""
Calculate limits for a group of arrays in a flexible manner.
Returns a dictionary of calculated (vmin, vmax), with the same keys as
`array_dict`.
Useful for plotting heatmaps of multiple datasets, and the vmin/vmax values
of the colormaps need to be matched across all (or a subset) of heatmaps.
Parameters
----------
array_dict : dict of np.arrays
method : {'global', 'independent', callable}
If method="global", then use the global min/max values across all
arrays in array_dict. If method="independent", then each array will
have its own min/max calcuated. If a callable, then it will be used to
group the keys of `array_dict`, and each group will have its own
group-wise min/max calculated.
percentiles : None or list
If not None, a list of (lower, upper) percentiles in the range [0,100].
"""
if percentiles is not None:
for percentile in percentiles:
if not 0 <= percentile <= 100:
raise ValueError("percentile (%s) not between [0, 100]")
if method == 'global':
all_arrays = np.concatenate(
[i.ravel() for i in array_dict.itervalues()]
)
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
d = dict([(i, (vmin, vmax)) for i in array_dict.keys()])
elif method == 'independent':
d = {}
for k, v in array_dict.iteritems():
d[k] = (v.min(), v.max())
elif hasattr(method, '__call__'):
d = {}
sorted_keys = sorted(array_dict.keys(), key=method)
for group, keys in itertools.groupby(sorted_keys, method):
keys = list(keys)
all_arrays = np.concatenate([array_dict[i] for i in keys])
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
for key in keys:
d[key] = (vmin, vmax)
return d
def ci(arr, conf=0.95):
"""
Column-wise confidence interval.
Parameters
----------
arr : array-like
conf : float
Confidence interval
Returns
-------
m : array
column-wise mean
lower : array
lower column-wise confidence bound
upper : array
upper column-wise confidence bound
"""
m = arr.mean(axis=0)
n = len(arr)
se = arr.std(axis=0) / np.sqrt(n)
h = se * stats.t._ppf((1 + conf) / 2., n - 1)
return m, m - h, m + h
def nice_log(x):
"""
Uses a log scale but with negative numbers.
:param x: NumPy array
"""
neg = x < 0
xi = np.log2(np.abs(x) + 1)
xi[neg] = -xi[neg]
return xi
def tip_zscores(a):
"""
Calculates the "target identification from profiles" (TIP) zscores
from Cheng et al. 2001, Bioinformatics 27(23):3221-3227.
:param a: NumPy array, where each row is the signal for a feature.
"""
weighted = a * a.mean(axis=0)
scores = weighted.sum(axis=1)
zscores = (scores - scores.mean()) / scores.std()
return zscores
def fdrcorrection(pvals, alpha=0.05, method='indep'):
'''
NOTE: This function was copied from
statsmodels.sandbox.stats.multicomp.fdrcorrection0, from statsmodels
version 0.5.0.
This is to avoid requiring all of statsmodels to be a dependency for
metaseq, just for this function.
pvalue correction for false discovery rate
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests. Both are
available in the function multipletests, as method=`fdr_bh`, resp.
`fdr_by`.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'indep', 'negcorr')
Returns
-------
rejected : array, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : array
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
If there is prior information on the fraction of true hypothesis, then
alpha should be set to alpha * m/m_0 where m is the number of tests, given
by the p-values, and m_0 is an estimate of the true hypothesis. (see
Benjamini, Krieger and Yekuteli)
The two-step method of Benjamini, Krieger and Yekutiel that estimates the
number of false hypotheses will be available (soon).
Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and
'n' for fdr_by.
'''
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
sortrevind = pvals_sortind.argsort()
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1./np.arange(1, len(pvals_sorted)+1)) # corrected this
ecdffactor = _ecdf(pvals_sorted) / cm
# elif method in ['n', 'negcorr']:
# cm = np.sum(np.arange(len(pvals)))
# ecdffactor = ecdf(pvals_sorted)/cm
else:
raise ValueError('only indep and necorr implemented')
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
pvals_corrected[pvals_corrected > 1] = 1
return reject[sortrevind], pvals_corrected[sortrevind]
def tip_fdr(a, alpha=0.05):
"""
Returns adjusted TIP p-values for a particular `alpha`.
(see :func:`tip_zscores` for more info)
:param a: NumPy array, where each row is the signal for a feature
:param alpha: False discovery rate
"""
zscores = tip_zscores(a)
pvals = stats.norm.pdf(zscores)
rejected, fdrs = fdrcorrection(pvals)
return fdrs
def prepare_logged(x, y):
"""
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
"""
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi
def new_shell(figsize=(5, 12), strip=False, height_ratios=(4, 1),
width_ratios=(4, 1), subplot_params=None):
if subplot_params is None:
subplot_params = {}
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(
len(height_ratios),
len(width_ratios),
height_ratios=height_ratios,
width_ratios=width_ratios,
**subplot_params)
fig.array_axes = plt.subplot(gs[0, 0])
if strip:
fig.strip_axes = plt.subplot(gs[0, 1], sharey=fig.array_axes)
fig.line_axes = plt.subplot(gs[1, 0], sharex=fig.array_axes)
fig.cax = plt.subplot(gs[1, 1])
fig.gs = gs
return fig
def matrix_and_line_shell(figsize=(5, 12), strip=False):
"""
Helper function to construct an empty figure that has space for a matrix,
a summary line plot directly below it, a colorbar axis, and an optional
"strip" axis that parallels the matrix (and shares its y-axis) where data
can be added to create callbacks.
Returns a tuple of (fig, matrix_ax, line_ax, strip_ax, colorbar_ax) that
can then be used to plot upon.
:param figsize: Tuple of (width, height), in inches, for the figure to be
:param strip: If `strip` is False, then the returned `strip_ax` will be
None and no strip axes will be created.
"""
fig = plt.figure(figsize=figsize)
# Constants to keep track
if strip:
STRIP_COLS = 1
else:
STRIP_COLS = 0
ROWS = 4
COLS = 8 + STRIP_COLS
MAT_COLS = 7
MAT_ROWS = 3
LINE_ROWS = ROWS - MAT_ROWS
mat_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, STRIP_COLS),
rowspan=MAT_ROWS,
colspan=MAT_COLS,
)
line_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(MAT_ROWS, STRIP_COLS),
rowspan=LINE_ROWS,
colspan=MAT_COLS,
sharex=mat_ax)
if strip:
strip_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, 0),
rowspan=MAT_ROWS,
colspan=STRIP_COLS,
sharey=mat_ax,
)
else:
strip_ax = None
cax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(ROWS - MAT_ROWS, MAT_COLS + STRIP_COLS),
rowspan=1,
colspan=1,
)
fig.subplots_adjust(hspace=0.1, wspace=0.2, right=0.88, left=0.23)
return fig, mat_ax, line_ax, strip_ax, cax
def clustered_sortind(x, k=10, scorefunc=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param scorefunc: Optional function for sorting rows within clusters. Must
accept a single argument of a NumPy array.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if not scorefunc:
def scorefunc(x):
return x.mean(axis=0).max()
for label in range(k):
ind = labels == label
score = scorefunc(x[ind, :])
scores[ind] = score
pos = 0
breaks = []
ind = np.argsort(scores)
for k, g in itertools.groupby(labels[ind]):
pos += len(list(g))
breaks.append(pos)
return ind, breaks
def new_clustered_sortind(x, k=10, row_key=None, cluster_key=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param row_key:
Optional function to act as a sort key for sorting rows within
clusters. Signature should be `scorefunc(a)` where `a` is a 1-D NumPy
array.
:param cluster_key:
Optional function for sorting clusters. Signature is `clusterfunc(a)`
where `a` is a NumPy array containing all rows of `x` for cluster `i`.
It must return a single value.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if cluster_key:
# It's easier for calling code to provide something that operates on
# a cluster level, but here it's converted to work on a label level
# that looks in to the array `x`.
def _cluster_key(i):
return cluster_key(x[labels == i, :])
sorted_labels = sorted(range(k), key=_cluster_key)
else:
# Otherwise just use them as-is.
sorted_labels = range(k)
if row_key:
# Again, easier to provide a function to operate on a row. But here we
# need it to accept an index
def _row_key(i):
return row_key(x[i, :])
final_ind = []
breaks = []
pos = 0
for label in sorted_labels:
# which rows in `x` have this label
label_inds = np.nonzero(labels == label)[0]
if row_key:
label_sort_ind = sorted(label_inds, key=_row_key)
else:
label_sort_ind = label_inds
for li in label_sort_ind:
final_ind.append(li)
pos += len(label_inds)
breaks.append(pos)
return np.array(final_ind), np.array(breaks)
def input_ip_plots(iparr, inputarr, diffed, x, sort_ind,
prefix=None, limits1=(None, None), limits2=(None, None),
hlines=None, vlines=None):
"""
All-in-one plotting function to make a 5-panel figure.
Panels are IP, input, and diffed; plus 2 line plots showing averages.
:param iparr, inputarr: NumPy arrays constructed by a genomic_signal object
:param diffed: Difference of `iparr` and `inputarr`, but can be some other
transformation.
:param x: Extent to use -- for TSSs, maybe something like
np.linspace(-1000, 1000, bins), or for just bin IDs, something like
`np.arange(bins)`.
:param sort_ind: row order for each of the 3 panels -- usually interesting
to use `clustered_sortind` or `tip_zscores`
:param prefix: Used to prefix plot titles with '%(prefix)s IP", etc
:param limits1: Tuple passed to the Normalize function for IP and input.
:param limits2: Tuple passed tot he Normalize function for the diffed array
:param hlines: List of (position, kwarg) tuples for plotting horizontal
lines. Kwargs are passed directly to axhline. Useful for delimiting
clusters, if you used `clustered_sortind` and have both `row_order` and
`breaks`.
:param vlines: List of (position, kwargs) tuples. A vertical line will be
plotted at each position using kwargs.
"""
# global min and max
gmin = min(iparr.min(), inputarr.min())
gmax = max(iparr.max(), inputarr.max())
fig = plt.figure(figsize=(10, 10))
# 3 arrays, 2 line plots, a gene strip, and 2 colorbars. Plots share the
# axes that make sense
#
# 3 arrays
ax1 = plt.subplot2grid(
(9, 9), (0, 0), colspan=3, rowspan=6)
ax2 = plt.subplot2grid(
(9, 9), (0, 3), colspan=3, rowspan=6, sharex=ax1, sharey=ax1)
ax3 = plt.subplot2grid(
(9, 9), (0, 6), colspan=3, rowspan=6, sharex=ax1, sharey=ax1)
# 2 line plots
ax4 = plt.subplot2grid((9, 9), (6, 3), colspan=3, rowspan=3, sharex=ax1)
ax5 = plt.subplot2grid((9, 9), (6, 6), colspan=3, rowspan=3, sharex=ax1)
# 2 colorbars
cax1 = plt.Axes(fig, rect=(0.05, 0.25, 0.25, 0.025))
cax2 = plt.Axes(fig, rect=(0.05, 0.15, 0.25, 0.025))
# For nice imshow axes
extent = (min(x), max(x), 0, diffed.shape[0])
cm = matplotlib.cm.gist_gray
cm.set_bad('k')
cm.set_over('r')
cm.set_under('b')
limits1 = list(limits1)
limits2 = list(limits2)
all_base = np.column_stack((iparr.ravel(), inputarr.ravel())).ravel()
if limits1[0] is None:
limits1[0] = mlab.prctile(
all_base, 1. / all_base.size)
if limits1[1] is None:
limits1[1] = mlab.prctile(
all_base, 100 - 1. / all_base.size)
if limits2[0] is None:
limits2[0] = mlab.prctile(
diffed.ravel(), 1. / all_base.size)
if limits2[1] is None:
limits2[1] = mlab.prctile(
diffed.ravel(), 100 - 1. / all_base.size)
del all_base
imshow_kwargs = dict(
interpolation='nearest',
aspect='auto',
cmap=cm,
norm=matplotlib.colors.Normalize(*limits1),
extent=extent,
origin='lower')
# modify kwargs for diffed (by changing the normalization)
diffed_kwargs = imshow_kwargs.copy()
diffed_kwargs['norm'] = matplotlib.colors.Normalize(*limits2)
# IP
mappable1 = ax1.imshow(iparr[sort_ind, :], **imshow_kwargs)
# input
mappable2 = ax2.imshow(inputarr[sort_ind, :], **imshow_kwargs)
# diffed
mappable3 = ax3.imshow((diffed)[sort_ind, :], **diffed_kwargs)
# IP and input line plot with vertical line
ax4.plot(x, inputarr.mean(axis=0), color='k', linestyle='--',
label='input')
ax4.plot(x, iparr.mean(axis=0), color='k', label='ip')
ax4.axvline(0, color='k', linestyle=':')
# Diffed line plot with vertical line
ax5.plot(x, diffed.mean(axis=0), 'k', label='enrichment')
ax5.axvline(0, color='k', linestyle=':')
# Colorbars
cbar1 = fig.colorbar(mappable1, cax1, orientation='horizontal')
cbar2 = fig.colorbar(mappable3, cax2, orientation='horizontal')
fig.add_axes(cax1)
fig.add_axes(cax2)
# labeling...
ax1.set_ylabel('features')
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
ax4.set_xlabel('bp')
ax4.set_ylabel('mean reads per million mapped reads')
ax5.set_xlabel('bp')
cax1.set_xlabel('Reads per million mapped reads')
cax2.set_xlabel('Enrichment (RPMMR)')
if prefix is None:
prefix = ""
ax1.set_title('%s IP' % prefix)
ax2.set_title('%s input' % prefix)
ax3.set_title('Difference')
# diffed line plot should have y ax on right
ax5.yaxis.set_ticks_position('right')
ax5.yaxis.set_label_position('right')
ax5.set_ylabel('enriched reads per million mapped reads')
# Legends
ax4.legend(loc='best', frameon=False)
ax5.legend(loc='best', frameon=False)
# Make sure everybody snaps to xmin/xmax
for ax in [ax1, ax2, ax3, ax4, ax5]:
ax.axis(xmin=extent[0], xmax=extent[1])
if not hlines:
hlines = []
if not vlines:
vlines = []
for ax in [ax1, ax2, ax3]:
for pos, kwargs in hlines:
ax.axhline(pos, **kwargs)
for pos, kwargs in vlines:
ax.axvline(pos, **kwargs)
fig.subplots_adjust(bottom=0.05, top=0.95, hspace=0.75, wspace=0.9)
return fig
def _updatecopy(orig, update_with, keys=None, override=False):
"""
Update a copy of dest with source. If `keys` is a list, then only update
with those keys.
"""
d = orig.copy()
if keys is None:
keys = update_with.keys()
for k in keys:
if k in update_with:
if k in d and not override:
continue
d[k] = update_with[k]
return d
def _clean(z):
"""
Return a version of z that only has finite values
"""
return z[np.isfinite(z)]
class MarginalHistScatter(object):
def __init__(self, ax, hist_size=0.6, pad=0.05):
"""
Class to enable incremental appending of scatterplots, each of which
generate additional marginal histograms.
"""
self.scatter_ax = ax
self.fig = ax.figure
self.divider = make_axes_locatable(self.scatter_ax)
self.top_hists = []
self.right_hists = []
self.hist_size = hist_size
self.pad = pad
self.xfirst_ax = None
self.yfirst_ax = None
# will hold histogram data
self.hxs = []
self.hys = []
@property
def xmax(self):
return self.scatter_ax.dataLim.xmax
@property
def ymax(self):
return self.scatter_ax.dataLim.ymax
@property
def xmin(self):
return self.scatter_ax.dataLim.xmin
@property
def ymin(self):
return self.scatter_ax.dataLim.ymin
@property
def limits(self):
return (self.xmin, self.xmax, self.ymin, self.ymax)
def append(self, x, y, scatter_kwargs, hist_kwargs=None, xhist_kwargs=None,
yhist_kwargs=None, num_ticks=3, labels=None, hist_share=False,
marginal_histograms=True):
"""
Adds a new scatter to self.scatter_ax as well as marginal histograms
for the same data, borrowing addtional room from the axes.
Parameters
----------
x, y : array-like
Data to be plotted
scatter_kwargs : dict
Keyword arguments that are passed directly to scatter().
hist_kwargs : dict
Keyword arguments that are passed directly to hist(), for both the
top and side histograms.
xhist_kwargs, yhist_kwargs : dict
Additional, margin-specific kwargs for the x or y histograms
respectively. These are used to update `hist_kwargs`
num_ticks : int
How many tick marks to use in each histogram's y-axis
labels : array-like
Optional NumPy array of labels that will be set on the collection
so that they can be accessed by a callback function.
hist_share : bool
If True, then all histograms will share the same frequency axes.
Useful for showing relative heights if you don't want to use the
hist_kwarg `normed=True`
marginal_histograms : bool
Set to False in order to disable marginal histograms and just use
as a normal scatterplot.
"""
scatter_kwargs = scatter_kwargs or {}
hist_kwargs = hist_kwargs or {}
xhist_kwargs = xhist_kwargs or {}
yhist_kwargs = yhist_kwargs or {}
yhist_kwargs.update(dict(orientation='horizontal'))
# Plot the scatter
coll = self.scatter_ax.scatter(x, y, **scatter_kwargs)
coll.labels = labels
if not marginal_histograms:
return
xhk = _updatecopy(hist_kwargs, xhist_kwargs)
yhk = _updatecopy(hist_kwargs, yhist_kwargs)
axhistx = self.divider.append_axes(
'top', size=self.hist_size,
pad=self.pad, sharex=self.scatter_ax, sharey=self.xfirst_ax)
axhisty = self.divider.append_axes(
'right', size=self.hist_size,
pad=self.pad, sharey=self.scatter_ax, sharex=self.yfirst_ax)
axhistx.yaxis.set_major_locator(
MaxNLocator(nbins=num_ticks, prune='both'))
axhisty.xaxis.set_major_locator(
MaxNLocator(nbins=num_ticks, prune='both'))
if not self.xfirst_ax and hist_share:
self.xfirst_ax = axhistx
if not self.yfirst_ax and hist_share:
self.yfirst_ax = axhisty
# Keep track of which axes are which, because looking into fig.axes
# list will get awkward....
self.top_hists.append(axhistx)
self.right_hists.append(axhisty)
# Scatter will deal with NaN, but hist will not. So clean the data
# here.
hx = _clean(x)
hy = _clean(y)
self.hxs.append(hx)
self.hys.append(hy)
# Only plot hists if there's valid data
if len(hx) > 0:
if len(hx) == 1:
_xhk = _updatecopy(orig=xhk, update_with=dict(bins=[hx[0], hx[0]]), keys=['bins'])
axhistx.hist(hx, **_xhk)
else:
axhistx.hist(hx, **xhk)
if len(hy) > 0:
if len(hy) == 1:
_yhk = _updatecopy(orig=yhk, update_with=dict(bins=[hy[0], hy[0]]), keys=['bins'])
axhisty.hist(hy, **_yhk)
else:
axhisty.hist(hy, **yhk)
# Turn off unnecessary labels -- for these, use the scatter's axes
# labels
for txt in axhisty.get_yticklabels() + axhistx.get_xticklabels():
txt.set_visible(False)
for txt in axhisty.get_xticklabels():
txt.set_rotation(-90)
def add_legends(self, xhists=True, yhists=False, scatter=True, **kwargs):
"""
Add legends to axes.
"""
axs = []
if xhists:
axs.extend(self.hxs)
if yhists:
axs.extend(self.hys)
if scatter:
axs.extend(self.ax)
for ax in axs:
ax.legend(**kwargs)
| mit |
jameshicks/basketballdatabase | basketballdatabase/player.py | 1 | 6634 | from itertools import ifilter
from datetime import timedelta, datetime
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
from common import throttled, relativeurl_to_absolute, get_backtobacks
from common import consolidate_dfs, search_player
from common import ParseError
####
minimum_update_interval = timedelta(hours=10)
class Player(object):
def __init__(self, name):
self.name = name
self.updated_name = False
self.gamelog = None
self.last_updated = datetime.min
self._player_url = search_player(self.name)
def __repr__(self):
return 'Player: {}'.format(self.name)
def update(self, force=False):
if (not force) and (datetime.now() - self.last_updated) < minimum_update_interval:
#self._gamelog = self.__get_gamelog()
return
curseason = self.gamelog.Season.max()
prevseasons = self.gamelog[self.gamelog.Season < curseason]
newseasons = self.__get_gamelog(after=curseason)
self.gamelog = consolidate_dfs([prevseasons, newseasons])
self.gamelog.sort('Date', inplace=True)
self.gamelog.set_index('Date', drop=False, inplace=True)
def clear(self):
self.gamelog = None
self.last_updated =datetime.min
def apply_statistic(self, name, func):
self.gamelog[name] = func(self.gamelog)
def __links_to_player_season_gamelogs(self, after=None):
pg = requests.get(self._player_url)
pg.raise_for_status()
soup = BeautifulSoup(pg.text)
# Update the player name
if not self.updated_name:
self.name = str(soup.find('h1').string)
self.updated_name = True
links = (link for link in soup.find_all('a')
if link.get('href') is not None
and 'gamelog' in link.get('href'))
if after:
links = ifilter(lambda x: x.text >= after, links)
links = sorted(set(links))
for link in links:
yield relativeurl_to_absolute(link.get('href'))
def __get_gamelog(self, after=None):
playerpage = self._player_url
seasons = consolidate_dfs(
self.get_player_season_data_from_url(url)
for url in self.__links_to_player_season_gamelogs(after=after))
self.last_updated = datetime.now()
seasons.sort('Date', inplace=True)
return seasons
@throttled
def get_player_season_data_from_url(self, url):
pg = requests.get(url)
soup = BeautifulSoup(pg.text)
basicstats = self.process_gamelog(soup.find(id='pgl_basic'))
advanced = self.process_gamelog(soup.find(id='pgl_advanced'))
stats = self.merge_basic_and_advanced(basicstats, advanced)
stats['Player'] = self.name
stats['Playoff'] = 0
# Find the season
season = soup.find(lambda x: x.name =='h1' and 'Game Log' in x.string)
if not season:
raise ParseError("Couldn't determine season!")
season = season.string.split()[-3]
stats['Season'] = season
if not soup.find(id='pgl_basic_playoffs'):
return stats
else:
playoffbasic = self.process_gamelog(soup.find(id='pgl_basic_playoffs'))
playoffadv = self.process_gamelog(soup.find(id='pgl_advanced_playoffs'))
playoffstats = self.merge_basic_and_advanced(playoffbasic, playoffadv)
playoffstats['Playoff'] = 1
playoffstats['Season'] = season
stats = pd.concat([stats, playoffstats], axis=0)
return stats
def merge_basic_and_advanced(self,basic, advanced):
advcols = ['Date'] + [x for x in advanced.columns if x not in basic.columns]
a = advanced[advcols]
return pd.merge(basic, a, on='Date')
def process_gamelog(self, tag):
# Functions for cleaning the data
def delete_multiheader(table):
'''
Delete the extra header that shows up every 20 rows
or so in basketball-reference.com data
'''
for row in table.find_all('tr', class_='thead'):
row.decompose()
return table
def duration_fixer(duration):
try:
mins, secs = [int(x) for x in duration.split(':')]
return timedelta(minutes=mins, seconds=secs)
except:
return 0
def margin_parser(margin):
winloss, margin = margin.split()
margin = str(margin).translate(None, '()+')
return int(margin)
def age_parser(age):
y, d = age.split('-')
return int(y) + float(d) / 365.242
def fix_gs(gs):
if gs in {'0', '1', 0, 1}:
return int(gs)
else:
return np.nan
tag = delete_multiheader(tag)
# There should only be one table in the tag
df = pd.io.html.read_html(str(tag))[0]
# Force date to a pandas datetime object
df.Date = pd.to_datetime(df.Date, coerce=True)
# Minutes played gets interpreted as a string
df.MP = pd.to_timedelta([duration_fixer(x) for x in df.MP])
# Decimalized minutes played
df['MPd'] = [x.total_seconds() / 60 for x in df.MP]
# Give names to the unnamed columns
df.rename(columns = { u'Unnamed: 5': 'Away', u'Unnamed: 7': 'Margin' },
inplace=True)
# The 'Away' column has an '@' if it is away, else nothing'
df.Away = np.array([(pd.notnull(x) and x == '@') for x in df.Away], dtype=np.uint8)
# Get the win/loss margin in numeric format
df.Margin = [margin_parser(margin) for margin in df.Margin]
# Age is given as Years-Days, I'll convert to decimal
df.Age = [age_parser(age) for age in df.Age]
df.GS = [fix_gs(gs) for gs in df.GS]
# Get 2P statistics for basic stats
# True shooting percentage (TS%) is not in the basic stats
if 'TS%' not in df.columns:
df['2P'] = df['FG'] - df['3P']
df['2PA'] = df['FGA'] - df['3PA']
df['2P%'] = df['2P'] / df['2PA']
# A column for when a player is on a back-to-back
df['back2back'] = np.array(get_backtobacks(df.Date), dtype=np.uint8)
# Set the index to the date
df = df.set_index('Date', drop=False)
return df
if __name__ == '__main__':
t = Player('Lebron James')
from IPython import embed
embed()
print 'Done'
| mit |
elkingtonmcb/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
ybroze/trading-with-python | lib/yahooFinance.py | 76 | 8290 | # -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <[email protected]>
# License: BSD
"""
Toolset working with yahoo finance data
This module includes functions for easy access to YahooFinance data
Functions
----------
- `getHistoricData` get historic data for a single symbol
- `getQuote` get current quote for a symbol
- `getScreenerSymbols` load symbols from a yahoo stock screener file
Classes
---------
- `HistData` a class for working with multiple symbols
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index, HDFStore, WidePanel
import numpy as np
import os
from extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = WidePanel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in store.keys() ]
data = dict(zip(symbols,[store[symbol] for symbol in symbols]))
self.wp = WidePanel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print 'Saving data to', dataFile
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = WidePanel({symbol:df})
else:
self.wp[symbol] = df
except Exception,e:
print e
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
''' get current yahoo quote, return a DataFrame '''
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(zip(header,[[] for i in range(len(header))]))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
for line in lines:
fields = line.strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols: Yahoo finanance symbol or a list of symbols
sDate: start date (y,m,d)
eDate: end date (y,m,d)
adjust : T/[F] adjust data based on adj_close
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print 'Downloading data:'
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return WidePanel(data)
def getSymbolData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3], adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
if verbose:
print 'Got %i days of data' % len(df)
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low']))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
| bsd-3-clause |
hainm/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 126 | 13591 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
mbrucher/AudioTK | tests/Python/Distortion/PyATKDistortion_diode_clipper_test.py | 1 | 6956 | #!/usr/bin/env python
from ATK.Core import DoubleInPointerFilter, DoubleOutPointerFilter
from ATK.Tools import DoubleOversampling6points5order_32Filter, DoubleOversampling6points5order_16Filter, DoubleOversampling6points5order_8Filter, DoubleOversampling6points5order_4Filter, DoubleDecimationFilter
from ATK.EQ import DoubleButterworthLowPassFilter
from ATK.Distortion import DoubleDiodeClipperFilter
def filter_32(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = 48000
overfilter = DoubleOversampling6points5order_32Filter()
overfilter.input_sampling_rate = 48000
overfilter.output_sampling_rate = 48000 * 32
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleDiodeClipperFilter()
overdrivefilter.input_sampling_rate = 48000 * 32
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.input_sampling_rate = 48000 * 32
lowpassfilter.cut_frequency = 48000
lowpassfilter.order = 5
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.input_sampling_rate = 48000 * 32
decimationfilter.output_sampling_rate = 48000
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = 48000
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
def filter_16(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = 48000
overfilter = DoubleOversampling6points5order_16Filter()
overfilter.input_sampling_rate = 48000
overfilter.output_sampling_rate = 48000 * 16
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleDiodeClipperFilter()
overdrivefilter.input_sampling_rate = 48000 * 16
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.input_sampling_rate = 48000 * 16
lowpassfilter.cut_frequency = 48000
lowpassfilter.order = 5
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.input_sampling_rate = 48000 * 16
decimationfilter.output_sampling_rate = 48000
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = 48000
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
def filter_8(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = 48000
overfilter = DoubleOversampling6points5order_8Filter()
overfilter.input_sampling_rate = 48000
overfilter.output_sampling_rate = 48000 * 8
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleDiodeClipperFilter()
overdrivefilter.input_sampling_rate = 48000 * 8
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.input_sampling_rate = 48000 * 8
lowpassfilter.cut_frequency = 48000
lowpassfilter.order = 5
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.input_sampling_rate = 48000 * 8
decimationfilter.output_sampling_rate = 48000
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = 48000
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
def filter_4(input):
import numpy as np
output = np.zeros(input.shape, dtype=np.float64)
infilter = DoubleInPointerFilter(input, False)
infilter.input_sampling_rate = 48000
overfilter = DoubleOversampling6points5order_4Filter()
overfilter.input_sampling_rate = 48000
overfilter.output_sampling_rate = 48000 * 4
overfilter.set_input_port(0, infilter, 0)
overdrivefilter = DoubleDiodeClipperFilter()
overdrivefilter.input_sampling_rate = 48000 * 4
overdrivefilter.set_input_port(0, overfilter, 0)
lowpassfilter = DoubleButterworthLowPassFilter()
lowpassfilter.input_sampling_rate = 48000 * 4
lowpassfilter.cut_frequency = 48000
lowpassfilter.order = 5
lowpassfilter.set_input_port(0, overdrivefilter, 0)
decimationfilter = DoubleDecimationFilter(1)
decimationfilter.input_sampling_rate = 48000 * 4
decimationfilter.output_sampling_rate = 48000
decimationfilter.set_input_port(0, lowpassfilter, 0)
outfilter = DoubleOutPointerFilter(output, False)
outfilter.input_sampling_rate = 48000
outfilter.set_input_port(0, decimationfilter, 0)
outfilter.process(input.shape[1])
return output
def dc_32_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_dc.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output32_dc.dat", dtype=np.float64).reshape(1, -1)
out = filter_32(x)
assert_almost_equal(out, ref)
def dc_16_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_dc.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output16_dc.dat", dtype=np.float64).reshape(1, -1)
out = filter_16(x)
assert_almost_equal(out, ref)
def dc_8_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_dc.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output8_dc.dat", dtype=np.float64).reshape(1, -1)
out = filter_8(x)
assert_almost_equal(out, ref)
def dc_4_test():
import numpy as np
from numpy.testing import assert_almost_equal
import os
dirname = os.path.dirname(__file__)
x = np.fromfile(dirname + os.sep + "input_dc.dat", dtype=np.float64).reshape(1, -1)
ref = np.fromfile(dirname + os.sep + "output4_dc.dat", dtype=np.float64).reshape(1, -1)
out = filter_4(x)
assert_almost_equal(out, ref)
if __name__ == "__main__":
import numpy as np
size = 1200
x = np.arange(size).reshape(1, -1) / 48000.
d = np.sin(x * 2 * np.pi * 100)
d.tofile("input_dc.dat")
out = filter_32(d)
out.tofile("output32_dc.dat")
out = filter_16(d)
out.tofile("output16_dc.dat")
out = filter_8(d)
out.tofile("output8_dc.dat")
out = filter_4(d)
out.tofile("output4_dc.dat")
import matplotlib.pyplot as plt
plt.plot(x[0], d[0], label="input")
plt.plot(x[0], out[0], label="output")
plt.legend()
plt.show()
| bsd-3-clause |
plissonf/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
thp44/delphin_6_automation | data_process/validation/test/result_compar.py | 1 | 3046 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import numpy as np
import matplotlib.pyplot as plt
import os
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
#folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\test\Hans\Ms-11-5-DWD Weimar\results'
#folder_dif = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\test\Hans\Ms-11-5-DWD Weimar - Weather\results'
#folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\test\Hans\5cab2aa329a08e2e20fe4c6f - Dir\5cab2aa329a08e2e20fe4c6f\results'
#folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\test\Hans\5cab2aa329a08e2e20fe4c6f - BCO\5cab2aa329a08e2e20fe4c6f\results'
#folder_dif = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\test\Hans\Ms-11-5-DWD Weimar - Weather\results'
#folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\test\Error\Probabilistic\results'
folder = r'C:\ribuild\5cab2aa229a08e2e20fe4c6e\5cb5c00529a08e379485f1e9\results'
folder_dif = r'C:\ribuild\5cab2aa229a08e2e20fe4c6e\5cb5c00529a08e379485f1e9\results'
#folder_dif = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\test\Error\Determanistic\results'
temp_mould = 'temperature mould.d6o'
rh_mould = 'relative humidity mould.d6o'
heat_loss = 'heat loss.d6o'
temp = np.array(delphin_parser.d6o_to_dict(folder, temp_mould)[0])
temp_dif = np.array(delphin_parser.d6o_to_dict(folder_dif, temp_mould)[0])
rh = np.array(delphin_parser.d6o_to_dict(folder, rh_mould)[0])
rh_dif = np.array(delphin_parser.d6o_to_dict(folder_dif, rh_mould)[0])
heat = np.array(delphin_parser.d6o_to_dict(folder, heat_loss)[0])
heat_dif = np.array(delphin_parser.d6o_to_dict(folder_dif, heat_loss)[0])
x = np.arange(0, len(heat))
plt.figure()
plt.title('Temperature - Interface')
plt.plot(x, temp, label='Sun: Dir + Dif')
plt.plot(x, temp_dif, label='Sun: Dif + Dif')
plt.legend()
plt.figure()
plt.title('Relative Humidity - Interface')
plt.plot(x, rh, label='Sun: Dir + Dif')
plt.plot(x, rh_dif, label='Sun: Dif + Dif')
plt.legend()
plt.figure()
plt.title('Heat Loss')
plt.plot(x, heat, label='Sun: Dir + Dif')
plt.plot(x, heat_dif, label='Sun: Dif + Dif')
plt.legend()
plt.figure()
plt.title('Temperature - Difference')
plt.plot(x, temp - temp_dif)
plt.figure()
plt.title('Relative Humidity - Difference')
plt.plot(x, rh - rh_dif)
plt.figure()
plt.title('Heat Loss - Difference')
plt.plot(x, heat - heat_dif)
plt.figure()
plt.title('Heat Loss - Cumulated')
plt.plot(x, np.cumsum(heat), label='Sun: Dir + Dif')
plt.plot(x, np.cumsum(heat_dif), label='Sun: Dif + Dif')
plt.legend()
plt.show() | mit |
manashmndl/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
Selameab/opencog | opencog/python/spatiotemporal/temporal_events/composition/emperical_distribution.py | 34 | 6615 | import csv
import numpy
from spatiotemporal.temporal_events.relation_formulas import TemporalRelation
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium, generate_random_events
from spatiotemporal.time_intervals import TimeInterval
__author__ = 'keyvan'
def trim_float(float_object, no_digits=12):
return int(float_object * 10 ** no_digits) / float(10 ** no_digits)
def overlaps(bounds_1, bounds_2):
a_1, b_1 = bounds_1
a_2, b_2 = bounds_2
a_1, b_1, a_2, b_2 = trim_float(a_1), trim_float(b_1), trim_float(a_2), trim_float(b_2)
return a_1 < a_2 < b_1 or a_1 < b_2 < b_1 or a_2 < a_1 < b_2 or a_2 < b_1 < b_2 or a_1 == a_2 or b_1 == b_2
def generate_random_relations(size=1000):
relations = []
A = TemporalEventTrapezium(1000, 1008, 1002, 1004)
B_as = TimeInterval(991, 1008, size)
for B_a in B_as:
B = TemporalEventTrapezium(B_a, B_a + 9, B_a + 3, B_a + 8)
relations.append((A * B).to_list())
return relations
def generate_random_relations_file(size=20):
from datetime import datetime
from spatiotemporal.time_intervals import TimeInterval
csv_writer = csv.writer(open('relations.csv~', 'w'))
year_2010 = TimeInterval(datetime(2010, 1, 1), datetime(2011, 1, 1))
i = size
specifications = [None, None]
while i >= 0:
for j in xrange(2):
a = year_2010.random_time()
beg = year_2010.random_time(start=a)
end = year_2010.random_time(start=beg) #(start=a)
b = year_2010.random_time(start=end) #(start=max(end, beg))
specifications[j] = (a, b, beg, end)
a_beg, a_end = (specifications[0][0], specifications[0][2]), (specifications[0][3], specifications[0][1])
b_beg, b_end = (specifications[1][0], specifications[1][2]), (specifications[1][3], specifications[1][1])
valid = False
for bounds_1, bounds_2 in [
(a_beg, b_beg), (a_beg, b_end), (a_end, b_beg), (a_end, b_end)
]:
if overlaps(bounds_1, bounds_2):
valid = True
break
if not valid:
continue
event_1, event_2 = TemporalEventTrapezium(*specifications[0]), TemporalEventTrapezium(*specifications[1])
csv_writer.writerow((event_1 * event_2).to_list())
percentage = (size - i + 1) / float(size) * 100
if (size - i + 1) % 10**3 == 0:
print '%' + str(int(percentage))
i -= 1
def read_data(size=1000):
csv_reader = csv.reader(open('relations.csv~', 'r'))
relations = []
i = size
ps, ms, os = [], [], []
for row in csv_reader:
p, m, o = row[0:3]
p, m, o = float(p), float(m), float(o)
if i < 0:
break
ps.append(p)
ms.append(m)
os.append(o)
i -= 1
from matplotlib import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(ps, ms, os)
ax.set_xlabel('p')
ax.set_ylabel('m')
ax.set_zlabel('o')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
plt.show()
def classify(size=10000, iterable=None):
csv_reader = iterable
if iterable is None:
csv_reader = csv.reader(open('relations.csv~', 'r'))
classes = {}
for i, row in enumerate(csv_reader):
if i > size - 1:
print 'number of classes:', len(classes)
for class_type in classes:
print classes[class_type][0].type, len(classes[class_type])
return classes
relation = TemporalRelation.from_list(row)
if relation.type not in classes:
classes[relation.type] = [relation]
else:
classes[relation.type].append(relation)
print 'number of classes:', len(classes)
for class_type in classes:
print classes[class_type][0].type, len(classes[class_type])
return classes
def learn(size=10000):
classes = classify(size)
relations = classes['DSOMP']
size = len(relations)
train_size = size - size / 4
train_data = relations[0:train_size]
test_data = relations[train_size:]
train_x, train_y = [], []
for relation in train_data:
train_x.append([relation['O'], relation['M']])
train_y.append(relation['P'])
train_x = numpy.array(train_x)
train_y = numpy.array(train_y).T
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from random import randrange
clf = KNeighborsRegressor(8)#alpha=0.000001)
clf.fit(train_x, train_y)
test_x, test_y = [], []
for relation in test_data:
test_x.append([relation['O'], relation['M']])
test_y.append(relation['P'])
print '\n', '///////// tests ////////'
for i in xrange(5):
print 'learning', clf.predict(train_x[i])
print 'actual', train_y[i], '\n-------------\n'
print '***************************'
for i in xrange(5):
print 'learning', clf.predict(test_x[i])
print 'actual', test_y[i], '\n-------------\n'
def learn_all(size=10000):
relations = read_data(size)
size = len(relations)
train_size = size - size / 4
train_data = relations[0:train_size]
test_data = relations[train_size:]
train_x, train_y = [], []
for relation in train_data:
train_x.append(numpy.array([relation['F']]))
train_y.append(numpy.array([relation['o']]))
train_x = numpy.array(train_x)
train_y = numpy.array(train_y)
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from random import randrange
clf = KNeighborsRegressor()#alpha=0.000001)
clf.fit(train_x, train_y)
test_x, test_y = [], []
for relation in test_data:
test_x.append(numpy.array([relation['F']]))
test_y.append(numpy.array([relation['o']]))
print '\n', '///////// tests ////////'
for i in xrange(5):
print 'F:', train_x[i]
print 'learning', clf.predict(train_x[i])
print 'actual', train_y[i], '\n-------------\n'
print '***************************'
for i in xrange(5):
print 'F:', test_x[i]
print 'learning', clf.predict(test_x[i])
print 'actual', test_y[i], '\n-------------\n'
| agpl-3.0 |
wonkoderverstaendige/telemetry | gather.py | 1 | 3510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import socket
from util.misc import get_local_config, iso_date, iso_week
LOCAL_CONFIG = get_local_config()
SENSOR_HOST = LOCAL_CONFIG['host']['node']
LOCAL_HOST_NAME = socket.gethostname()
LOCAL_NODE_NAME = LOCAL_CONFIG['host']['node']
assert LOCAL_HOST_NAME == LOCAL_CONFIG['host']['name']
LOCAL_DB_PATH = LOCAL_CONFIG['paths']['db']
DTYPE_VAL = np.float32
DTYPE_CAT = np.uint8
nodes = {'chuck': 0, 'bed': 1}
sensors = {0: {'temp': 1, 'light': 2, 'soil':3, 'dummy': 4, 'ds_temp':5},
1: {'strain': 6, 'temp': 7, 'motion': 8}}
# needed because of the naming overlap in the sensors. [b/c]x are placeholders for debugging.
cats = {0: ['c0', 'temp', 'light', 'soil', 'c4', 'ds_temp', 'c6', 'c7'],
1: ['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'strain', 'temp', 'motion']}
def nid_from_filename(file_path):
return nodes[os.path.basename(file_path).split('-')[-1].split('.')[0]]
def load_node_csv(file_path, nid=None):
nid = nid if nid is not None else nid_from_filename(file_path)
data = pd.read_csv(file_path, names=['timestamp', 'sensor_str', 'value'], skiprows=1,
dtype={'timestamp': np.float64, 'sensor_str': str, 'value': DTYPE_VAL})
data.set_index(data.timestamp.multiply(1e9).astype('datetime64[ns]'), inplace=True)
sid = data.sensor_str.astype('category', categories=cats[nid_from_filename(file_path)]).cat.codes.astype(DTYPE_CAT)
return pd.DataFrame({'sid': sid, 'nid': np.array(nid, dtype=np.uint8), 'value': data.value})
def load_hdf(hdf_file_path):
return pd.read_hdf(hdf_file_path, 'data')
def last_samples_hdf(df):
df.sort_index(inplace=True)
assert df.index.is_monotonic
g = df.groupby('nid')
return {k: g.get_group(k).index[-1] for k in [0, 1]}
if __name__ == "__main__":
OFFSET = 2
sources = [{'host': 'VersedSquid', 'node': 'chuck', 'src': '~/code/telemetry/local/db/',
'iso': iso_date(), 'dst': LOCAL_DB_PATH},
{'host': 'RandyDolphin', 'node': 'bed', 'src': '~/code/telemetry/local/db/',
'iso': iso_date(), 'dst': LOCAL_DB_PATH}]
if iso_week(offset=0) != iso_week(offset=OFFSET):
import copy
print "Updating last weeks data, too"
sources.extend(copy.deepcopy(sources))
for n in range(len(sources) / 2):
sources[n]['iso'] = iso_date(offset=OFFSET)
for src in sources:
if src['host'] == LOCAL_HOST_NAME:
print "Skipping localhost"
continue
os.system("rsync -avh {host}:{src}{iso[0]:}_w{iso[1]:02}-{node}.csv {dst}".format(**src))
last = last_samples_hdf(load_hdf(os.path.join(LOCAL_DB_PATH, 'telemetry.h5')))
node_data = [load_node_csv(os.path.join(LOCAL_DB_PATH, '{iso[0]:}_w{iso[1]:02}-{node}.csv'.format(**src)))
for src in sources]
df = pd.concat([node_data[n][node_data[n].index > last[n]] for n in range(len(node_data))]).sort_index()
# FIXME: For some reason the values column is _sometimes_ cast to float64! The hell?
df.value = df.value.astype(np.float32)
try:
if df.shape[0]:
print df.shape[0], "new rows to be appended!"
with pd.HDFStore(os.path.join(LOCAL_DB_PATH, 'telemetry.h5')) as store:
store.append('data', df, append=True)
else:
print "Nothing new."
except ValueError, error:
print error
print df.dtypes
| mit |
lin-credible/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
nicococo/scRNA | setup.py | 1 | 1100 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Single-cell RNA-seq multitask clustering toolbox',
'url': 'https://github.com/nicococo/scRNA',
'author': 'Nico Goernitz, Bettina Mieth, Marina Vidovic, Alex Gutteridge',
'author_email': '[email protected]',
'version': '2019.08',
'install_requires': ['nose', 'scikit-learn','numpy', 'scipy', 'matplotlib', 'pandas'],
'packages': ['scRNA'],
'package_dir' : {'scRNA': 'scRNA'},
# 'package_data': {'scRNA': ['gene_names.txt']},
'scripts': ['bin/scRNA-source.sh', 'bin/scRNA-target.sh', 'bin/scRNA-generate-data.sh'],
'name': 'scRNA',
'classifiers': ['Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3']
}
setup(**config)
| mit |
robovm/robovm-studio | python/helpers/pydev/pydevconsole.py | 41 | 15763 | from _pydev_imps._pydev_thread import start_new_thread
try:
from code import InteractiveConsole
except ImportError:
from pydevconsole_code_for_ironpython import InteractiveConsole
from code import compile_command
from code import InteractiveInterpreter
import os
import sys
import _pydev_threading as threading
import traceback
import fix_getpass
fix_getpass.fixGetpass()
import pydevd_vars
from pydev_imports import Exec, _queue
try:
import __builtin__
except:
import builtins as __builtin__
try:
False
True
except NameError: # version < 2.3 -- didn't have the True/False builtins
import __builtin__
setattr(__builtin__, 'True', 1) #Python 3.0 does not accept __builtin__.True = 1 in its syntax
setattr(__builtin__, 'False', 0)
from pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from pydev_console_utils import CodeFragment
IS_PYTHON_3K = False
IS_PY24 = False
try:
if sys.version_info[0] == 3:
IS_PYTHON_3K = True
elif sys.version_info[0] == 2 and sys.version_info[1] == 4:
IS_PY24 = True
except:
#That's OK, not all versions of python have sys.version_info
pass
class Command:
def __init__(self, interpreter, code_fragment):
"""
:type code_fragment: CodeFragment
:type interpreter: InteractiveConsole
"""
self.interpreter = interpreter
self.code_fragment = code_fragment
self.more = None
def symbol_for_fragment(code_fragment):
if code_fragment.is_single_line:
symbol = 'single'
else:
symbol = 'exec' # Jython doesn't support this
return symbol
symbol_for_fragment = staticmethod(symbol_for_fragment)
def run(self):
text = self.code_fragment.text
symbol = self.symbol_for_fragment(self.code_fragment)
self.more = self.interpreter.runsource(text, '<input>', symbol)
try:
try:
execfile #Not in Py3k
except NameError:
from pydev_imports import execfile
__builtin__.execfile = execfile
except:
pass
# Pull in runfile, the interface to UMD that wraps execfile
from pydev_umd import runfile, _set_globals_function
try:
import builtins
builtins.runfile = runfile
except:
import __builtin__
__builtin__.runfile = runfile
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, host, client_port, mainThread):
BaseInterpreterInterface.__init__(self, mainThread)
self.client_port = client_port
self.host = host
self.namespace = {}
self.interpreter = InteractiveConsole(self.namespace)
self._input_error_printed = False
def doAddExec(self, codeFragment):
command = Command(self.interpreter, codeFragment)
command.run()
return command.more
def getNamespace(self):
return self.namespace
def getCompletions(self, text, act_tok):
try:
from _pydev_completer import Completer
completer = Completer(self.namespace, None)
return completer.complete(act_tok)
except:
import traceback
traceback.print_exc()
return []
def close(self):
sys.exit(0)
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
class _ProcessExecQueueHelper:
_debug_hook = None
_return_control_osc = False
def set_debug_hook(debug_hook):
_ProcessExecQueueHelper._debug_hook = debug_hook
def process_exec_queue(interpreter):
from pydev_ipython.inputhook import get_inputhook, set_return_control_callback
def return_control():
''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find
out if they should cede control and return '''
if _ProcessExecQueueHelper._debug_hook:
# Some of the input hooks check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
# XXX: Eventually the inputhook code will have diverged enough
# from the IPython source that it will be worthwhile rewriting
# it rather than pretending to maintain the old API
_ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc
if _ProcessExecQueueHelper._return_control_osc:
return True
if not interpreter.exec_queue.empty():
return True
return False
set_return_control_callback(return_control)
from pydev_import_hook import import_hook_manager
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
import_hook_manager.add_module_name("matplotlib", lambda: activate_matplotlib(interpreter.enableGui))
# enable_gui_function in activate_matplotlib should be called in main thread. That's why we call
# interpreter.enableGui which put it into the interpreter's exec_queue and executes it in the main thread.
import_hook_manager.add_module_name("pylab", activate_pylab)
import_hook_manager.add_module_name("pyplot", activate_pyplot)
while 1:
# Running the request may have changed the inputhook in use
inputhook = get_inputhook()
if _ProcessExecQueueHelper._debug_hook:
_ProcessExecQueueHelper._debug_hook()
if inputhook:
try:
# Note: it'll block here until return_control returns True.
inputhook()
except:
import traceback;traceback.print_exc()
try:
try:
code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second
except _queue.Empty:
continue
if callable(code_fragment):
# It can be a callable (i.e.: something that must run in the main
# thread can be put in the queue for later execution).
code_fragment()
else:
more = interpreter.addExec(code_fragment)
except KeyboardInterrupt:
interpreter.buffer = None
continue
except SystemExit:
raise
except:
type, value, tb = sys.exc_info()
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
if 'IPYTHONENABLE' in os.environ:
IPYTHON = os.environ['IPYTHONENABLE'] == 'True'
else:
IPYTHON = True
try:
try:
exitfunc = sys.exitfunc
except AttributeError:
exitfunc = None
if IPYTHON:
from pydev_ipython_console import InterpreterInterface
if exitfunc is not None:
sys.exitfunc = exitfunc
else:
try:
delattr(sys, 'exitfunc')
except:
pass
except:
IPYTHON = False
pass
#=======================================================================================================================
# _DoExit
#=======================================================================================================================
def DoExit(*args):
'''
We have to override the exit because calling sys.exit will only actually exit the main thread,
and as we're in a Xml-rpc server, that won't work.
'''
try:
import java.lang.System
java.lang.System.exit(1)
except ImportError:
if len(args) == 1:
os._exit(args[0])
else:
os._exit(0)
def handshake():
return "PyCharm"
#=======================================================================================================================
# StartServer
#=======================================================================================================================
def start_server(host, port, interpreter):
if port == 0:
host = ''
#I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse.
from pydev_imports import SimpleXMLRPCServer as XMLRPCServer #@Reimport
try:
if IS_PY24:
server = XMLRPCServer((host, port), logRequests=False)
else:
server = XMLRPCServer((host, port), logRequests=False, allow_none=True)
except:
sys.stderr.write('Error starting server with host: %s, port: %s, client_port: %s\n' % (host, port, interpreter.client_port))
raise
# Tell UMD the proper default namespace
_set_globals_function(interpreter.getNamespace)
server.register_function(interpreter.execLine)
server.register_function(interpreter.execMultipleLines)
server.register_function(interpreter.getCompletions)
server.register_function(interpreter.getFrame)
server.register_function(interpreter.getVariable)
server.register_function(interpreter.changeVariable)
server.register_function(interpreter.getDescription)
server.register_function(interpreter.close)
server.register_function(interpreter.interrupt)
server.register_function(handshake)
server.register_function(interpreter.connectToDebugger)
server.register_function(interpreter.hello)
server.register_function(interpreter.getArray)
server.register_function(interpreter.evaluate)
# Functions for GUI main loop integration
server.register_function(interpreter.enableGui)
if port == 0:
(h, port) = server.socket.getsockname()
print(port)
print(interpreter.client_port)
sys.stderr.write(interpreter.get_greeting_msg())
sys.stderr.flush()
server.serve_forever()
return server
def StartServer(host, port, client_port):
#replace exit (see comments on method)
#note that this does not work in jython!!! (sys method can't be replaced).
sys.exit = DoExit
interpreter = InterpreterInterface(host, client_port, threading.currentThread())
start_new_thread(start_server,(host, port, interpreter))
process_exec_queue(interpreter)
def get_interpreter():
try:
interpreterInterface = getattr(__builtin__, 'interpreter')
except AttributeError:
interpreterInterface = InterpreterInterface(None, None, threading.currentThread())
setattr(__builtin__, 'interpreter', interpreterInterface)
return interpreterInterface
def get_completions(text, token, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
return interpreterInterface.getCompletions(text, token)
#===============================================================================
# Debugger integration
#===============================================================================
def exec_code(code, globals, locals):
interpreterInterface = get_interpreter()
interpreterInterface.interpreter.update(globals, locals)
res = interpreterInterface.needMore(code)
if res:
return True
interpreterInterface.addExec(code)
return False
class ConsoleWriter(InteractiveInterpreter):
skip = 0
def __init__(self, locals=None):
InteractiveInterpreter.__init__(self, locals)
def write(self, data):
#if (data.find("global_vars") == -1 and data.find("pydevd") == -1):
if self.skip > 0:
self.skip -= 1
else:
if data == "Traceback (most recent call last):\n":
self.skip = 1
sys.stderr.write(data)
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred."""
#Override for avoid using sys.excepthook PY-12600
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
sys.stderr.write(''.join(list))
def showtraceback(self):
"""Display the exception that just occurred."""
#Override for avoid using sys.excepthook PY-12600
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
lines = traceback.format_list(tblist)
if lines:
lines.insert(0, "Traceback (most recent call last):\n")
lines.extend(traceback.format_exception_only(type, value))
finally:
tblist = tb = None
sys.stderr.write(''.join(lines))
def consoleExec(thread_id, frame_id, expression):
"""returns 'False' in case expression is partially correct
"""
frame = pydevd_vars.findFrame(thread_id, frame_id)
expression = str(expression.replace('@LINE@', '\n'))
#Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
#(Names not resolved in generator expression in method)
#See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals
if IPYTHON:
return exec_code(CodeFragment(expression), updated_globals, frame.f_locals)
interpreter = ConsoleWriter()
try:
code = compile_command(expression)
except (OverflowError, SyntaxError, ValueError):
# Case 1
interpreter.showsyntaxerror()
return False
if code is None:
# Case 2
return True
#Case 3
try:
Exec(code, updated_globals, frame.f_locals)
except SystemExit:
raise
except:
interpreter.showtraceback()
return False
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
#Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole
#so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple
#representations of its classes).
#See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446:
#'Variables' and 'Expressions' views stopped working when debugging interactive console
import pydevconsole
sys.stdin = pydevconsole.BaseStdIn()
port, client_port = sys.argv[1:3]
import pydev_localhost
if int(port) == 0 and int(client_port) == 0:
(h, p) = pydev_localhost.get_socket_name()
client_port = p
pydevconsole.StartServer(pydev_localhost.get_localhost(), int(port), int(client_port))
| apache-2.0 |
justincassidy/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/kwiklib/dataio/tests/test_klustersloader.py | 2 | 17397 | """Unit tests for loader module."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
from collections import Counter
import numpy as np
import numpy.random as rnd
import pandas as pd
import shutil
from nose.tools import with_setup
from kwiklib.dataio.tests.mock_data import (
nspikes, nclusters, nsamples, nchannels, fetdim, TEST_FOLDER,
setup, teardown)
from kwiklib.dataio import (KlustersLoader, read_clusters, save_clusters,
find_filename, find_indices, filename_to_triplet, triplet_to_filename,
read_cluster_info, save_cluster_info, read_group_info, save_group_info,
renumber_clusters, reorder, convert_to_clu, select, get_indices,
check_dtype, check_shape, get_array, load_text,
find_filename_or_new)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_find_filename():
dir = '/my/path/'
extension_requested = 'spk'
files = [
'blabla.aclu.1',
'blabla_test.aclu.1',
'blabla_test2.aclu.1',
'blabla_test3.aclu.3',
'blabla.spk.1',
'blabla_test.spk.1',
'blabla_test.spk.1',
]
spkfile = find_filename('/my/path/blabla.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla.spk.1'
spkfile = find_filename('/my/path/blabla_test.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.1'
spkfile = find_filename('/my/path/blabla_test2.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.1'
spkfile = find_filename('/my/path/blabla_test3.clu.1', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.1'
spkfile = find_filename('/my/path/blabla_test3.clu.3', extension_requested,
files=files, dir=dir)
assert spkfile == None
def test_find_filename2():
dir = '/my/path/'
extension_requested = 'spk'
files = [
'blabla.aclu.2',
'blabla_test.aclu.2',
'blabla_test2.aclu.2',
'blabla_test3.aclu.3',
'blabla.spk.2',
'blabla_test.spk.2',
]
spkfile = find_filename('/my/path/blabla_test.xml', extension_requested,
files=files, dir=dir)
assert spkfile == dir + 'blabla_test.spk.2'
def test_find_filename_or_new():
dir = '/my/path/'
files = [
'blabla.aclu.1',
'blabla_test.aclu.1',
'blabla_test2.aclu.1',
'blabla_test3.aclu.3',
'blabla.spk.1',
'blabla_test.spk.1',
'blabla_test.spk.1',
]
file = find_filename_or_new('/my/path/blabla.xml', 'acluinfo',
files=files, dir=dir)
assert file == '/my/path/blabla.acluinfo.1'
def test_find_indices():
dir = '/my/path/'
files = [
'blabla.aclu.2',
'blabla_test.aclu.2',
'blabla_test.spk.4',
'blabla_test2.aclu.2',
'blabla.aclu.9',
'blabla_test3.aclu.3',
'blabla.spk.2',
'blabla_test.spk.2',
]
indices = find_indices('/my/path/blabla_test.xml',
files=files, dir=dir)
assert indices == [2, 4]
def test_triplets():
filename = 'my/path/blabla.aclu.2'
triplet = filename_to_triplet(filename)
filename2 = triplet_to_filename(triplet)
assert filename == filename2
assert triplet_to_filename(triplet[:2] + ('34',)) == \
'my/path/blabla.aclu.34'
def test_clusters():
dir = TEST_FOLDER
clufile = os.path.join(dir, 'test.aclu.1')
clufile2 = os.path.join(dir, 'test.aclu.1.saved')
clusters = read_clusters(clufile)
assert clusters.dtype == np.int32
assert clusters.shape == (1000,)
# Save.
save_clusters(clufile2, clusters)
# Open again.
clusters2 = read_clusters(clufile2)
assert np.array_equal(clusters, clusters2)
# Check the headers.
clusters_with_header = load_text(clufile, np.int32, skiprows=0)
clusters2_with_header = load_text(clufile2, np.int32, skiprows=0)
assert np.array_equal(clusters_with_header, clusters2_with_header)
def test_reorder():
# Generate clusters and permutations.
clusters = np.random.randint(size=1000, low=10, high=100)
clusters_unique = np.unique(clusters)
permutation = clusters_unique[np.random.permutation(len(clusters_unique))]
# Reorder.
clusters_reordered = reorder(clusters, permutation)
# Check.
i = len(clusters_unique) // 2
c = clusters_unique[i]
i_new = np.nonzero(permutation == c)[0][0]
my_clusters = clusters == c
assert np.all(clusters_reordered[my_clusters] == i_new)
def te_SKIP_st_renumber_clusters():
# Create clusters.
clusters = np.random.randint(size=20, low=10, high=100)
clusters_unique = np.unique(clusters)
n = len(clusters_unique)
# Create cluster info.
cluster_info = np.zeros((n, 3), dtype=np.int32)
cluster_info[:, 0] = clusters_unique
cluster_info[:, 1] = np.mod(np.arange(n, dtype=np.int32), 35) + 1
# Set groups.
k = n // 3
cluster_info[:k, 2] = 1
cluster_info[k:2 * n // 3, 2] = 0
cluster_info[2 * k:, 2] = 2
cluster_info[n // 2, 2] = 1
cluster_info = pd.DataFrame({
'color': cluster_info[:, 1],
'group': cluster_info[:, 2]},
dtype=np.int32, index=cluster_info[:, 0])
# Renumber
clusters_renumbered, cluster_info_renumbered = renumber_clusters(clusters,
cluster_info)
# Test.
c0 = clusters_unique[k] # group 0
c1 = clusters_unique[0] # group 1
c2 = clusters_unique[2 * k] # group 2
cm = clusters_unique[n // 2] # group 1
c0next = clusters_unique[k + 1]
c1next = clusters_unique[0 + 1]
c2next = clusters_unique[2 * k + 1]
# New order:
# c0 ... cm-1, cm+1, ..., c2-1, c1, ..., c0-1, cm, c2, ...
assert np.array_equal(clusters == c0, clusters_renumbered == 0 + 2)
assert np.array_equal(clusters == c0next,
clusters_renumbered == 1 + 2)
assert np.array_equal(clusters == c1, clusters_renumbered == k - 1 + 2)
assert np.array_equal(clusters == c1next,
clusters_renumbered == k + 2)
assert np.array_equal(clusters == c2, clusters_renumbered == 2 * k + 2)
assert np.array_equal(clusters == c2next,
clusters_renumbered == 2 * k + 1 + 2)
assert np.array_equal(get_indices(cluster_info_renumbered),
np.arange(n) + 2)
# Increasing groups with the new numbering.
assert np.all(np.diff(get_array(cluster_info_renumbered)[:,1]) >= 0)
assert np.all(select(cluster_info_renumbered, 0 + 2) ==
select(cluster_info, c0))
assert np.all(select(cluster_info_renumbered, 1 + 2) ==
select(cluster_info, c0next))
assert np.all(select(cluster_info_renumbered, k - 1 + 2) ==
select(cluster_info, c1))
assert np.all(select(cluster_info_renumbered, k + 2) ==
select(cluster_info, c1next))
assert np.all(select(cluster_info_renumbered, 2 * k + 2) ==
select(cluster_info, c2))
assert np.all(select(cluster_info_renumbered, 2 * k + 1 + 2) ==
select(cluster_info, c2next))
def test_convert_to_clu():
clusters = np.random.randint(size=1000, low=10, high=100)
clusters0 = clusters == 10
clusters1 = clusters == 20
clusters[clusters0] = 2
clusters[clusters1] = 3
clusters_unique = np.unique(clusters)
n = len(clusters_unique)
cluster_groups = np.random.randint(size=n, low=0, high=4)
noise = np.in1d(clusters, clusters_unique[np.nonzero(cluster_groups == 0)[0]])
mua = np.in1d(clusters, clusters_unique[np.nonzero(cluster_groups == 1)[0]])
cluster_info = pd.DataFrame({'group': cluster_groups,
'color': np.zeros(n, dtype=np.int32)},
index=clusters_unique,
dtype=np.int32)
clusters_new = convert_to_clu(clusters, cluster_info['group'])
assert np.array_equal(clusters_new == 0, noise)
assert np.array_equal(clusters_new == 1, mua)
def test_cluster_info():
dir = TEST_FOLDER
clufile = os.path.join(dir, 'test.aclu.1')
cluinfofile = os.path.join(dir, 'test.acluinfo.1')
clusters = read_clusters(clufile)
indices = np.unique(clusters)
colors = np.random.randint(low=0, high=10, size=len(indices))
groups = np.random.randint(low=0, high=2, size=len(indices))
cluster_info = pd.DataFrame({'color': pd.Series(colors, index=indices),
'group': pd.Series(groups, index=indices)})
save_cluster_info(cluinfofile, cluster_info)
cluster_info2 = read_cluster_info(cluinfofile)
assert np.array_equal(cluster_info.values, cluster_info2.values)
def test_group_info():
dir = TEST_FOLDER
groupinfofile = os.path.join(dir, 'test.groups.1')
group_info = np.zeros((4, 2), dtype=object)
group_info[:,0] = (np.arange(4) + 1)
group_info[:,1] = np.array(['Noise', 'MUA', 'Good', 'Unsorted'],
dtype=object)
group_info = pd.DataFrame(group_info)
save_group_info(groupinfofile, group_info)
group_info2 = read_group_info(groupinfofile)
assert np.array_equal(group_info.values, group_info2.values)
def test_klusters_loader_1():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
# Get full data sets.
features = l.get_features()
# features_some = l.get_some_features()
masks = l.get_masks()
waveforms = l.get_waveforms()
clusters = l.get_clusters()
spiketimes = l.get_spiketimes()
nclusters = len(Counter(clusters))
probe = l.get_probe()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
cluster_sizes = l.get_cluster_sizes()
# Check the shape of the data sets.
# ---------------------------------
assert check_shape(features, (nspikes, nchannels * fetdim + 1))
# assert features_some.shape[1] == nchannels * fetdim + 1
assert check_shape(masks, (nspikes, nchannels))
assert check_shape(waveforms, (nspikes, nsamples, nchannels))
assert check_shape(clusters, (nspikes,))
assert check_shape(spiketimes, (nspikes,))
assert check_shape(probe, (nchannels, 2))
assert check_shape(cluster_colors, (nclusters,))
assert check_shape(cluster_groups, (nclusters,))
assert check_shape(group_colors, (4,))
assert check_shape(group_names, (4,))
assert check_shape(cluster_sizes, (nclusters,))
# Check the data type of the data sets.
# -------------------------------------
assert check_dtype(features, np.float32)
assert check_dtype(masks, np.float32)
# HACK: Panel has no dtype(s) attribute
# assert check_dtype(waveforms, np.float32)
assert check_dtype(clusters, np.int32)
assert check_dtype(spiketimes, np.float32)
assert check_dtype(probe, np.float32)
assert check_dtype(cluster_colors, np.int32)
assert check_dtype(cluster_groups, np.int32)
assert check_dtype(group_colors, np.int32)
assert check_dtype(group_names, object)
assert check_dtype(cluster_sizes, np.int32)
l.close()
def test_klusters_loader_2():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
# Get full data sets.
features = l.get_features()
masks = l.get_masks()
waveforms = l.get_waveforms()
clusters = l.get_clusters()
spiketimes = l.get_spiketimes()
nclusters = len(Counter(clusters))
probe = l.get_probe()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
cluster_sizes = l.get_cluster_sizes()
# Check selection.
# ----------------
index = nspikes / 2
waveform = select(waveforms, index)
cluster = clusters[index]
spikes_in_cluster = np.nonzero(clusters == cluster)[0]
nspikes_in_cluster = len(spikes_in_cluster)
l.select(clusters=[cluster])
# Check the size of the selected data.
# ------------------------------------
assert check_shape(l.get_features(), (nspikes_in_cluster,
nchannels * fetdim + 1))
assert check_shape(l.get_masks(full=True), (nspikes_in_cluster,
nchannels * fetdim + 1))
assert check_shape(l.get_waveforms(),
(nspikes_in_cluster, nsamples, nchannels))
assert check_shape(l.get_clusters(), (nspikes_in_cluster,))
assert check_shape(l.get_spiketimes(), (nspikes_in_cluster,))
# Check waveform sub selection.
# -----------------------------
waveforms_selected = l.get_waveforms()
assert np.array_equal(get_array(select(waveforms_selected, index)),
get_array(waveform))
l.close()
def test_klusters_loader_control():
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
# Take all spikes in cluster 3.
spikes = get_indices(l.get_clusters(clusters=3))
# Put them in cluster 4.
l.set_cluster(spikes, 4)
spikes_new = get_indices(l.get_clusters(clusters=4))
# Ensure all spikes in old cluster 3 are now in cluster 4.
assert np.all(np.in1d(spikes, spikes_new))
# Change cluster groups.
clusters = [2, 3, 4]
group = 0
l.set_cluster_groups(clusters, group)
groups = l.get_cluster_groups(clusters)
assert np.all(groups == group)
# Change cluster colors.
clusters = [2, 3, 4]
color = 12
l.set_cluster_colors(clusters, color)
colors = l.get_cluster_colors(clusters)
assert np.all(colors == color)
# Change group name.
group = 0
name = l.get_group_names(group)
name_new = 'Noise new'
assert name == 'Noise'
l.set_group_names(group, name_new)
assert l.get_group_names(group) == name_new
# Change group color.
groups = [1, 2]
colors = l.get_group_colors(groups)
color_new = 10
l.set_group_colors(groups, color_new)
assert np.all(l.get_group_colors(groups) == color_new)
# Add cluster and group.
spikes = get_indices(l.get_clusters(clusters=3))[:10]
# Create new group 100.
l.add_group(100, 'New group', 10)
# Create new cluster 10000 and put it in group 100.
l.add_cluster(10000, 100, 10)
# Put some spikes in the new cluster.
l.set_cluster(spikes, 10000)
clusters = l.get_clusters(spikes=spikes)
assert np.all(clusters == 10000)
groups = l.get_cluster_groups(10000)
assert groups == 100
l.set_cluster(spikes, 2)
# Remove the new cluster and group.
l.remove_cluster(10000)
l.remove_group(100)
assert np.all(~np.in1d(10000, l.get_clusters()))
assert np.all(~np.in1d(100, l.get_cluster_groups()))
l.close()
@with_setup(setup)
def test_klusters_save():
"""WARNING: this test should occur at the end of the module since it
changes the mock data sets."""
# Open the mock data.
dir = TEST_FOLDER
xmlfile = os.path.join(dir, 'test.xml')
l = KlustersLoader(filename=xmlfile)
clusters = l.get_clusters()
cluster_colors = l.get_cluster_colors()
cluster_groups = l.get_cluster_groups()
group_colors = l.get_group_colors()
group_names = l.get_group_names()
# Set clusters.
indices = get_indices(clusters)
l.set_cluster(indices[::2], 2)
l.set_cluster(indices[1::2], 3)
# Set cluster info.
cluster_indices = l.get_clusters_unique()
l.set_cluster_colors(cluster_indices[::2], 10)
l.set_cluster_colors(cluster_indices[1::2], 20)
l.set_cluster_groups(cluster_indices[::2], 1)
l.set_cluster_groups(cluster_indices[1::2], 0)
# Save.
l.remove_empty_clusters()
l.save()
clusters = read_clusters(l.filename_aclu)
cluster_info = read_cluster_info(l.filename_acluinfo)
assert np.all(clusters[::2] == 2)
assert np.all(clusters[1::2] == 3)
assert np.array_equal(cluster_info.index, cluster_indices)
assert np.all(cluster_info.values[::2, 0] == 10)
assert np.all(cluster_info.values[1::2, 0] == 20)
assert np.all(cluster_info.values[::2, 1] == 1)
assert np.all(cluster_info.values[1::2, 1] == 0)
l.close()
| gpl-3.0 |
dvklopfenstein/biocode | src/Tests/matplotlib/color_example.py | 1 | 1459 | #!/usr/bin/env python
"""Example using MplColorHelper."""
# Copyright (C) 2014-2015 DV Klopfenstein. All rights reserved.
__author__ = 'DV Klopfenstein'
__copyright__ = "Copyright (C) 2014-2017 DV Klopfenstein. All rights reserved."
__license__ = "GPL"
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pydvkbiology.matplotlib.ColorObj import MplColorHelper
import numpy as np
def main():
""" Plots all the named colors in matplotlib.
From: http://stackoverflow.com/questions/22408237/named-colors-in-matplotlib
"""
import math
import matplotlib.patches as patches
import matplotlib.colors as colors
fig = plt.figure()
axis = fig.add_subplot(111)
ratio = 1.0 / 3.0
count = math.ceil(math.sqrt(len(colors.cnames)))
x_count = count * ratio
y_count = count / ratio
xval = 0
yval = 0
width = 1 / x_count
height = 1 / y_count
for color_name in colors.cnames:
pos = (xval / x_count, yval / y_count)
axis.add_patch(patches.Rectangle(pos, width, height, color=color_name))
axis.annotate(color_name, xy=pos)
if yval >= y_count-1:
xval += 1
yval = 0
else:
yval += 1
plt.show()
plt.savefig('color_example.png')
print(' WROTE: {IMG}'.format(IMG='color_example.png'))
if __name__ == '__main__':
main()
# Copyright (C) 2014-2017 DV Klopfenstein. All rights reserved.
| mit |
hsuantien/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.