repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pprett/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
swirlingsand/self-driving-car-nanodegree-nd013 | play/undistort_image_play.py | 1 | 1198 | import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved objpoints and imgpoints
dist_pickle = pickle.load(open("wide_dist_pickle.p", "rb"))
objpoints = dist_pickle["objpoints"]
imgpoints = dist_pickle["imgpoints"]
# Read in an image
img = cv2.imread('test_image.png')
# TODO: Write a function that takes an image, object points, and image points
# performs the camera calibration, image distortion correction and
# returns the undistorted image
def cal_undistort(img, objpoints, imgpoints):
# Use cv2.calibrateCamera and cv2.undistort()
# Calibrate camera
shape = img.shape[0:2]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objpoints, imgpoints, shape, None, None)
# Undistort image
destination = cv2.undistort(img, mtx, dist, None, mtx)
return destination
undistorted = cal_undistort(img, objpoints, imgpoints)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(undistorted)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
| mit |
466152112/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
zihua/scikit-learn | sklearn/metrics/pairwise.py | 8 | 46489 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
Ex-Mente/auxi.0 | auxi/core/reporting.py | 1 | 3295 | #!/usr/bin/env python3
"""
This module provides classes to create reports.
"""
from io import StringIO
from enum import Enum
import csv
from tabulate import tabulate
from auxi.core.objects import Object
__version__ = '0.3.6'
__license__ = 'LGPL v3'
__copyright__ = 'Copyright 2016, Ex Mente Technologies (Pty) Ltd'
__author__ = 'Johan Zietsman'
__credits__ = ['Johan Zietsman']
__maintainer__ = 'Johan Zietsman'
__email__ = '[email protected]'
__status__ = 'Planning'
class ReportFormat(Enum):
"""
Represents the format the report should be outputted as.
"""
printout = 1,
latex = 2,
txt = 3,
csv = 4,
string = 5,
matplotlib = 6,
png = 7
class Report(Object):
"""
Base class for all auxi reports.
"""
def __init__(self, data_source, output_path=None):
self.data_source = data_source
self.output_path = output_path
def _generate_table_(self):
return []
def _render_matplotlib_(self, png=False):
pass
def render(self, format=ReportFormat.printout):
"""
Render the report in the specified format
:param format: The format. The default format is to print
the report to the console.
:returns: If the format was set to 'string' then a string
representation of the report is returned.
"""
table = self._generate_table_()
if format == ReportFormat.printout:
print(tabulate(table, headers="firstrow", tablefmt="simple"))
elif format == ReportFormat.latex:
self._render_latex_(table)
elif format == ReportFormat.txt:
self._render_txt_(table)
elif format == ReportFormat.csv:
self._render_csv_(table)
elif format == ReportFormat.string:
return str(tabulate(table, headers="firstrow", tablefmt="simple"))
elif format == ReportFormat.matplotlib:
self._render_matplotlib_()
elif format == ReportFormat.png:
if self.output_path is None:
self._render_matplotlib_()
else:
self._render_matplotlib_(True)
def _render_latex_(self, table):
if self.output_path is not None:
with open(self.output_path + '.tex', 'w') as f:
f.write(
tabulate(table, headers="firstrow", tablefmt="latex"))
else:
print(tabulate(table, headers="firstrow", tablefmt="latex"))
def _render_txt_(self, table):
if self.output_path is not None:
with open(self.output_path + '.txt', 'w') as f:
f.write(
tabulate(table, headers="firstrow", tablefmt="simple"))
else:
print(tabulate(table, headers="firstrow", tablefmt="simple"))
def _render_csv_(self, table):
if self.output_path is not None:
with open(self.output_path + '.csv', 'w') as f:
csv.writer(f, lineterminator='\n').writerows(table)
else:
with StringIO() as f:
csv.writer(f, lineterminator='\n').writerows(table)
print(f.getvalue())
if __name__ == "__main__":
import unittest
from reporting_test import ReportingUnitTester
unittest.main()
| lgpl-3.0 |
dyoung418/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 21 | 6697 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
rdeits/director | src/python/scripts/logReporter.py | 6 | 4903 | import os
import sys
import time
import lcm
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from ddapp import lcmspy as spy
import scipy.signal as sig
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
class LCMLogAnalyzer(object):
def __init__(self, logFile):
self.logFile = logFile
self.jointVelocityTimes = list()
self.jointVelocityNorms = list()
self.jointVelocities = list()
self.batteryTimes = list()
self.batteryPercentage = list()
self.pressureTimes = list()
self.pressureReadings = list()
self.slidingWindowWidth = 100
self.movementThreshold = 0.4
def parseLog(self):
log = lcm.EventLog(self.logFile, 'r')
print 'Log size: ' + sizeof_fmt(log.size())
log.seek(0)
while True:
event = log.read_next_event()
if not event:
break
timestamp = event.timestamp
if event.channel == 'EST_ROBOT_STATE':
msg = spy.decodeMessage(event.data)
self.jointVelocityTimes.append(timestamp)
self.jointVelocityNorms.append(np.linalg.norm(msg.joint_velocity))
elif event.channel == 'ATLAS_BATTERY_DATA':
msg = spy.decodeMessage(event.data)
self.batteryTimes.append(timestamp)
self.batteryPercentage.append(msg.remaining_charge_percentage)
elif event.channel == 'ATLAS_STATUS':
msg = spy.decodeMessage(event.data)
self.pressureTimes.append(timestamp)
self.pressureReadings.append(msg.pump_supply_pressure)
print 'parsed ' + str(len(self.jointVelocityNorms)) + ' robot states'
print 'parsed ' + str(len(self.batteryPercentage)) + ' battery states'
print 'parsed ' + str(len(self.pressureReadings)) + ' pump readings'
def movingAverage(self, x):
N = self.slidingWindowWidth
return np.convolve(x, np.ones((N,))/N)[(N-1):]
def plotPump(self, tmin):
plt.title('Pump')
plt.ylabel('System Pressure (PSI)')
scaledPressureTimes = (np.asarray(self.pressureTimes) - tmin) / 1e6
plt.plot(scaledPressureTimes, self.pressureReadings)
v = plt.axis()
plt.axis([0, scaledPressureTimes[-1], v[2], v[3]])
plt.grid(True)
def plotMovement(self, smoothed, movement, tmin):
plt.title('Movement')
scaledJointTimes = (np.asarray(self.jointVelocityTimes) - tmin) / 1e6
plt.plot(scaledJointTimes, smoothed)
plt.plot(scaledJointTimes, movement, 'r--')
plt.axis([0, scaledJointTimes[-1], 0, 1.2])
plt.ylabel('smoothed norm of xdot')
plt.grid(True)
def plotBattery(self, tmin):
plt.title('Battery')
plt.ylabel('Charge Remaining (%)')
scaledBatteryTimes = (np.asarray(self.batteryTimes) - tmin) / 1e6
plt.plot(scaledBatteryTimes, self.batteryPercentage)
v = plt.axis()
plt.axis([0, scaledBatteryTimes[-1], v[2], v[3]])
plt.grid(True)
def plotResults(self):
tmin = self.jointVelocityTimes[0]
tmax = self.jointVelocityTimes[-1]
totalMicroseconds = float(tmax-tmin)
average_dt = totalMicroseconds/len(self.jointVelocityNorms)
smoothed = self.movingAverage(self.jointVelocityNorms)
movement = smoothed > self.movementThreshold
movementSeconds = float(np.count_nonzero(movement) * average_dt / 1e6)
totalSeconds = float((tmax-tmin)/1e6)
print("%.2f / %.2f seconds of movement ( %.2f %% continuous motion) " % (movementSeconds, totalSeconds , movementSeconds / totalSeconds * 1e2))
minChargePercent = np.ndarray.min(np.asarray(self.batteryPercentage))
print("Battery fell from %.2f %% to %.2f %% (Used %.2f %%)" % (self.batteryPercentage[0], minChargePercent , self.batteryPercentage[0] - minChargePercent))
print 'plotting results'
plt.figure(1)
plt.suptitle('LCM Log Battery/Movement Analysis')
plt.subplot(311)
self.plotMovement(smoothed, movement, tmin)
plt.subplot(312)
self.plotPump(tmin)
plt.subplot(313)
self.plotBattery(tmin)
plt.xlabel('Time (s)')
plt.show()
def main(argv):
try:
logFile = sys.argv[1]
except IndexError:
print 'Usage: %s <log file>' % sys.argv[0]
sys.exit(1)
spy.findLCMModulesInSysPath()
parser = LCMLogAnalyzer(logFile)
parser.parseLog()
parser.plotResults()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
rileyrustad/pdxapartmentfinder | pipeline/processing.py | 1 | 4319 | import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import json
from collections import Counter
import operator
import os.path
from collections import defaultdict
from datetime import date, datetime
#========================================================
def days_between(d1, d2):
d1 = datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.strptime(d2, "%Y-%m-%d")
return abs((d2 - d1).days)
def averager(my_dict, key):
temp = []
for entry in my_dict:
if my_dict[entry][key] < 10000:
temp.append(float(my_dict[entry][key]))
return np.nanmean(temp)
def moder(my_dict, key):
temp = []
for entry in my_dict:
temp.append(my_dict[entry][key])
temp = Counter(temp)
mode = max(temp.iteritems(), key=operator.itemgetter(1))[0]
try:
if np.isnan(mode):
del temp[mode]
mode = max(temp.iteritems(), key=operator.itemgetter(1))[0]
return mode
except TypeError:
return mode
def imputer(listing, imputables):
for entry in listing:
if entry == 'smoking':
if listing[entry] != 'no smoking':
listing[entry] = 'smoking'
if entry == 'wheelchair':
if listing[entry] != 'wheelchair access':
listing[entry] = 'no wheelchair access'
if entry == 'bath':
if listing[entry] == 'shared' or listing[entry] == 'split':
listing[entry] = .5
try:
if np.isnan(listing[entry]):
listing[entry]=imputables[entry]
except TypeError:
continue
except KeyError:
continue
return listing
def imputables_getter(my_dict):
imputables = {}
continuous_features = ['content', 'price', 'feet', 'getphotos','long',
'lat']
discrete_features = ['laundry', 'bed', 'bath', 'housingtype', 'parking']
imputables = {}
for variable in continuous_features:
imputables[variable] = averager(my_dict, variable)
for variable in discrete_features:
imputables[variable] = moder(my_dict, variable)
return imputables
if __name__ == '__main__':
filepath = 'data/MasterApartmentData.json'
if os.path.isfile(filepath) == True:
f = open(filepath)
my_dict = json.load(f)
f.close()
# If the file doesn't exist, create that file.
else:
my_dict = {}
# Create 90 Day data file.
Day90Data = open('data/Day90ApartmentData.json',"w")
dict90 = defaultdict(dict)
today = date.today()
# check entries in master data, and include those from the last 90 days
for entry in my_dict:
if days_between(str(today), my_dict[entry]['date']) <= 90:
dict90[entry] = my_dict[entry]
print str(len(dict90))+ " listings posted in the last 90 days."
json.dump(dict90, Day90Data)
Day90Data.close()
my_dict = dict90
# #open the data from the last 90 days
# with open('data/Day90ApartmentData.json', r) as f:
# my_dict = json.load(f)
"""All of the variables in the dictionary are:
u'available', u'content', u'laundry', u'furnished', u'price',
u'time', u'dog', u'bed', u'bath', u'feet', u'date', u'long', u'parking',
u'lat', u'smoking', u'getphotos', u'cat', u'hasmap', u'wheelchair',
u'housingtype', u'lastseen']"""
"""All variables used: u'content', u'laundry', u'price', u'dog', u'bed',
u'bath', u'feet', u'long', u'parking', u'lat', u'smoking', u'getphotos',
u'cat', u'hasmap', u'wheelchair', u'housingtype'
"""
#go back and adjust the gis variables
continuous_features = ['content', 'price', 'feet', 'getphotos','long',
'lat']
discrete_features = ['laundry', 'bed', 'bath', 'housingtype', 'parking']
imputables = imputables_getter(my_dict)
for listing in my_dict:
my_dict[listing] = imputer(my_dict[listing],imputables)
dframe = DataFrame(my_dict).T
dframe = dframe[['content', 'laundry', 'price', 'dog', 'bed',
'bath', 'feet', 'long', 'parking', 'lat', 'smoking', 'getphotos',
'cat', 'hasmap', 'wheelchair', 'housingtype']]
dframe = pd.get_dummies(dframe, columns = ['laundry', 'parking', 'smoking',
'wheelchair', 'housingtype'])
# from sklearn.cross_validation import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(
# dframe.drop('price', axis = 1), dframe.price, test_size=0.33)
# from sklearn.ensemble import RandomForestRegressor
# reg = RandomForestRegressor()
# reg.fit(X_train, y_train)
# processed = open('data/ProcessedDay90ApartmentData.json',"w")
# json.dump(my_dict, processed)
# processed.close()
| mit |
kagayakidan/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
kleskjr/scipy | scipy/special/basic.py | 14 | 71025 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import math
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt,
where, mgrid, sin, place, issubdtype, extract,
less, inexact, nan, zeros, atleast_1d, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,
psi, _zeta, hankel1, hankel2, yv, kv, _gammaln,
ndtri, errprint, poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def gammaln(x):
"""
Logarithm of the absolute value of the Gamma function for real inputs.
Parameters
----------
x : array-like
Values on the real line at which to compute ``gammaln``
Returns
-------
gammaln : ndarray
Values of ``gammaln`` at x.
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal with
complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``.
Note that `gammaln` currently accepts complex-valued inputs, but it is not
the same function as for real-valued inputs, and the branch is not
well-defined --- using `gammaln` with complex is deprecated and will be
disallowed in future Scipy versions.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
"""
if np.iscomplexobj(x):
warnings.warn(("Use of gammaln for complex arguments is "
"deprecated as of scipy 0.18.0. Use "
"scipy.special.loggamma instead."),
DeprecationWarning)
return _gammaln(x)
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect
to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
@np.deprecate(message="scipy.special.sph_jn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn instead. "
"Note that the new function has a different signature.")
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
See also
--------
spherical_jn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
@np.deprecate(message="scipy.special.sph_yn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_jnyn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn and "
"scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_jn
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_in is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in instead. "
"Note that the new function has a different signature.")
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
See also
--------
spherical_in
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
@np.deprecate(message="scipy.special.sph_kn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
@np.deprecate(message="scipy.special.sph_inkn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in and "
"scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_in
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Associated Legendre function of the second kind, Qmn(z).
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre functions of the first kind, Pn(z).
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre functions of the second kind, Qn(z).
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a, b)
agm(a, b)=agm(b, a)
agm(a, a) = a
min(a, b) < agm(a, b) < max(a, b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# http://stackoverflow.com/a/16327037/125507
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120L
"""
if exact:
if np.ndim(n) == 0:
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int can't handle size
if un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in xrange(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
return out
else:
n = asarray(n)
vals = gamma(n + 1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann zeta function.
The two-argument version is the Hurwitz zeta function:
.. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x},
Riemann zeta function corresponds to ``q = 1``.
See also
--------
zetac
"""
if q is None:
q = 1
return _zeta(x, q, out)
| bsd-3-clause |
habibutsu/research | python/pyaudio/filter.py | 1 | 1858 | from __future__ import print_function, division, unicode_literals
import wave
import numpy as np
# compatibility with Python 3
import matplotlib.pyplot as plt
def plot_data(num, data, ampl):
plt.figure(1)
a = plt.subplot(211)
r = 2**16/2
a.set_ylim([-r, r])
a.set_xlabel('time [s]')
a.set_ylabel('sample value [-]')
x = np.arange(44100)/44100
print(x)
print(data)
plt.plot(x, data)
b = plt.subplot(212)
b.set_xscale('log')
b.set_xlabel('frequency [Hz]')
b.set_ylabel('|amplitude|')
plt.plot(abs(ampl))
plt.savefig('sample-graph_%s.png' % num)
# Created input file with:
# mpg123 -w 20130509talk.wav 20130509talk.mp3
wr = wave.open('recorded.wav', 'r')
par = list(wr.getparams()) # Get the parameters from the input.
# This file is stereo, 2 bytes/sample, 44.1 kHz.
par[3] = 0 # The number of samples will be set by writeframes.
# Open the output file
ww = wave.open('recorded-filtered.wav', 'w')
ww.setparams(tuple(par)) # Use the same parameters as the input file.
lowpass = 21 # Remove lower frequencies.
highpass = 9000 # Remove higher frequencies.
sz = wr.getframerate() # Read and process 1 second at a time.
c = int(wr.getnframes()/sz) # whole file
for num in range(c):
print('Processing {}/{} s'.format(num+1, c))
da = np.fromstring(wr.readframes(sz), dtype=np.int16)
left, right = da[0::2], da[1::2] # left and right channel
lf, rf = np.fft.rfft(left), np.fft.rfft(right)
plot_data(num, left, lf)
lf[:lowpass], rf[:lowpass] = 0, 0 # low pass filter
lf[55:66], rf[55:66] = 0, 0 # line noise
lf[highpass:], rf[highpass:] = 0,0 # high pass filter
nl, nr = np.fft.irfft(lf), np.fft.irfft(rf)
ns = np.column_stack((nl,nr)).ravel().astype(np.int16)
ns2 = ns * 2
ww.writeframes(ns2.tostring())
# Close the files.
wr.close()
ww.close() | mit |
Titan-C/scikit-learn | examples/model_selection/plot_grid_search_digits.py | 56 | 2761 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
Naereen/notebooks | NetHack's functions Rne, Rn2 and Rnz in Python 3.py | 1 | 5169 |
# coding: utf-8
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#NetHack's-functions-Rne,-Rn2-and-Rnz-in-Python-3" data-toc-modified-id="NetHack's-functions-Rne,-Rn2-and-Rnz-in-Python-3-1"><span class="toc-item-num">1 </span>NetHack's functions Rne, Rn2 and Rnz in Python 3</a></div><div class="lev2 toc-item"><a href="#Rn2-distribution" data-toc-modified-id="Rn2-distribution-11"><span class="toc-item-num">1.1 </span><code>Rn2</code> distribution</a></div><div class="lev2 toc-item"><a href="#Rne-distribution" data-toc-modified-id="Rne-distribution-12"><span class="toc-item-num">1.2 </span><code>Rne</code> distribution</a></div><div class="lev2 toc-item"><a href="#Rnz-distribution" data-toc-modified-id="Rnz-distribution-13"><span class="toc-item-num">1.3 </span><code>Rnz</code> distribution</a></div><div class="lev2 toc-item"><a href="#Examples" data-toc-modified-id="Examples-14"><span class="toc-item-num">1.4 </span>Examples</a></div><div class="lev3 toc-item"><a href="#For-x=350" data-toc-modified-id="For-x=350-141"><span class="toc-item-num">1.4.1 </span>For <code>x=350</code></a></div><div class="lev2 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-15"><span class="toc-item-num">1.5 </span>Conclusion</a></div>
# # NetHack's functions Rne, Rn2 and Rnz in Python 3
#
# I liked [this blog post](https://eev.ee/blog/2018/01/02/random-with-care/#beware-gauss) by [Eevee](https://eev.ee/blog/).
# He wrote about interesting things regarding random distributions, and linked to [this page](https://nethackwiki.com/wiki/Rnz) which describes a weird distribution implemented as `Rnz` in the [NetHack](https://www.nethack.org/) game.
#
# > Note: I never heard of any of those before today.
#
# I wanted to implement and experiment with the `Rnz` distribution myself.
# Its code ([see here](https://nethackwiki.com/wiki/Source:NetHack_3.6.0/src/rnd.c#rnz)) uses two other distributions, `Rne` and `Rn2`.
# In[41]:
get_ipython().run_line_magic('load_ext', 'watermark')
get_ipython().run_line_magic('watermark', '-v -m -p numpy,matplotlib')
# In[39]:
import random
import numpy as np
import matplotlib.pyplot as plt
# ## `Rn2` distribution
#
# [The `Rn2` distribution](https://nethackwiki.com/wiki/Rn2) is simply an integer uniform distribution, between $0$ and $x-1$.
# In[19]:
def rn2(x):
return random.randint(0, x-1)
# In[20]:
np.asarray([rn2(10) for _ in range(100)])
# Testing for `rn2(x) == 0` gives a $1/x$ probability :
# In[32]:
from collections import Counter
# In[35]:
Counter([rn2(10) == 0 for _ in range(100)])
# In[36]:
Counter([rn2(10) == 0 for _ in range(1000)])
# In[37]:
Counter([rn2(10) == 0 for _ in range(10000)])
# ## `Rne` distribution
#
# [The `Rne` distribution]() is a truncated geometric distribution.
# In[88]:
def rne(x, truncation=5):
truncation = max(truncation, 1)
tmp = 1
while tmp < truncation and rn2(x) == 0:
tmp += 1
return tmp
# > In the NetHack game, the player's experience is used as default value of the `truncation` parameter...
# In[89]:
np.asarray([rne(3) for _ in range(50)])
# In[90]:
plt.hist(np.asarray([rne(3) for _ in range(10000)]), bins=5)
# In[91]:
np.asarray([rne(4, truncation=10) for _ in range(50)])
# In[92]:
plt.hist(np.asarray([rne(4, truncation=10) for _ in range(10000)]), bins=10)
# Let's check what [this page](https://nethackwiki.com/wiki/Rnz#Probability_density_function) says about `rne(4)`:
#
# > The rne(4) call returns an integer from 1 to 5, with the following probabilities:
# >
# > |Number| Probability |
# > |:-----|------------:|
# > | 1 | 3/4 |
# > | 2 | 3/16 |
# > | 3 | 3/64 |
# > | 4 | 3/256 |
# > | 5 | 1/256 |
# In[96]:
ref_table = {1: 3/4, 2: 3/16, 3: 3/64, 4: 3/256, 5: 1/256}
ref_table
# In[99]:
N = 100000
table = Counter([rne(4, truncation=5) for _ in range(N)])
for k in table:
table[k] /= N
table = dict(table)
table
# In[111]:
rel_diff = lambda x, y: abs(x - y) / x
for k in ref_table:
x, y = ref_table[k], table[k]
r = rel_diff(x, y)
print(f"For k={k}: relative difference is {r:.3g} between {x:.3g} (expectation) and {y:.3g} (with N={N} samples).")
# > Seems true !
# ## `Rnz` distribution
#
# It's not too hard to write.
# In[112]:
def rnz(i, truncation=10):
x = i
tmp = 1000
tmp += rn2(1000)
tmp *= rne(4, truncation=truncation)
flip = rn2(2)
if flip:
x *= tmp
x /= 1000
else:
x *= 1000
x /= tmp
return int(x)
# ## Examples
# In[113]:
np.asarray([rnz(3) for _ in range(100)])
# In[114]:
np.asarray([rnz(3, truncation=10) for _ in range(100)])
# ### For `x=350`
# In[115]:
np.asarray([rnz(350) for _ in range(100)])
# In[122]:
_ = plt.hist(np.asarray([rnz(350) for _ in range(100000)]), bins=200)
# In[78]:
np.asarray([rnz(350, truncation=10) for _ in range(100)])
# In[120]:
_ = plt.hist(np.asarray([rnz(350, truncation=10) for _ in range(10000)]), bins=200)
# ## Conclusion
# That's it, not so interesting but I wanted to write this.
| mit |
oysstu/pyxtf | setup.py | 1 | 1544 | from os import path
from setuptools import setup
from tools.generate_pyi import generate_pyi
def main():
# Generate .pyi files
import pyxtf.xtf_ctypes
generate_pyi(pyxtf.xtf_ctypes)
import pyxtf.vendors.kongsberg
generate_pyi(pyxtf.vendors.kongsberg)
# read the contents of README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Run setup script
setup(name='pyxtf',
version='1.2',
description='eXtended Triton Format (XTF) file interface',
long_description=long_description,
long_description_content_type='text/markdown',
author='Oystein Sture',
author_email='[email protected]',
url='https://github.com/oysstu/pyxtf',
license='MIT',
setup_requires=['numpy>=1.11'],
install_requires=['numpy>=1.11', 'matplotlib>=1.5.1'],
packages=['pyxtf', 'pyxtf.vendors'],
package_data={'': ['*.pyi']},
use_2to3=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 3 :: Only'
])
if __name__ == '__main__':
main()
| mit |
aflaxman/scikit-learn | sklearn/__init__.py | 9 | 5274 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
import os
from contextlib import contextmanager as _contextmanager
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
_ASSUME_FINITE = bool(os.environ.get('SKLEARN_ASSUME_FINITE', False))
def get_config():
"""Retrieve current values for configuration set by :func:`set_config`
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
"""
return {'assume_finite': _ASSUME_FINITE}
def set_config(assume_finite=None):
"""Set global scikit-learn configuration
Parameters
----------
assume_finite : bool, optional
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error.
"""
global _ASSUME_FINITE
if assume_finite is not None:
_ASSUME_FINITE = assume_finite
@_contextmanager
def config_context(**new_config):
"""Context manager for global scikit-learn configuration
Parameters
----------
assume_finite : bool, optional
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error.
Notes
-----
All settings, not just those presently modified, will be returned to
their previous values when the context manager is exited. This is not
thread-safe.
Examples
--------
>>> import sklearn
>>> from sklearn.utils.validation import assert_all_finite
>>> with sklearn.config_context(assume_finite=True):
... assert_all_finite([float('nan')])
>>> with sklearn.config_context(assume_finite=True):
... with sklearn.config_context(assume_finite=False):
... assert_all_finite([float('nan')])
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Input contains NaN, ...
"""
old_config = get_config().copy()
set_config(**new_config)
try:
yield
finally:
set_config(**old_config)
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module=r'^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.20.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
blaze/hdfs3 | hdfs3/core.py | 3 | 34058 | # -*- coding: utf-8 -*-
"Main module defining filesystem and file classes"
from __future__ import absolute_import
import ctypes
import logging
import os
import posixpath
import re
import warnings
import operator
import functools
from collections import deque
from .compatibility import FileNotFoundError, ConnectionError, PY3
from .conf import conf
from .utils import (read_block, seek_delimiter, ensure_bytes, ensure_string,
ensure_trailing_slash, MyNone)
logger = logging.getLogger(__name__)
_lib = None
DEFAULT_READ_BUFFER_SIZE = 2 ** 16
DEFAULT_WRITE_BUFFER_SIZE = 2 ** 26
def _nbytes(buf):
buf = memoryview(buf)
if PY3:
return buf.nbytes
return buf.itemsize * functools.reduce(operator.mul, buf.shape)
class HDFileSystem(object):
""" Connection to an HDFS namenode
>>> hdfs = HDFileSystem(host='127.0.0.1', port=8020) # doctest: +SKIP
"""
_first_pid = None
def __init__(self, host=MyNone, port=MyNone, connect=True, autoconf=True,
pars=None, **kwargs):
"""
Parameters
----------
host: str; port: int
Overrides which take precedence over information in conf files and
other passed parameters
connect: bool (True)
Whether to automatically attempt to establish a connection to the
name-node.
autoconf: bool (True)
Whether to use the configuration found in the conf module as
the set of defaults
pars : {str: str}
any parameters for hadoop, that you can find in hdfs-site.xml,
https://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml
This dict looks exactly like the one produced by conf - you can,
for example, remove any problematic entries.
kwargs: key/value
Further override parameters.
These are applied after the default conf and pars; the most typical
things to set are:
host : str (localhost)
namenode hostname or IP address, in case of HA mode it is name
of the cluster that can be found in "fs.defaultFS" option.
port : int (8020)
namenode RPC port usually 8020, in HA mode port mast be None
user, ticket_cache, token, effective_user : str
kerberos things
"""
self.conf = conf.copy() if autoconf else {}
if pars:
self.conf.update(pars)
self.conf.update(kwargs)
if host is not MyNone:
self.conf['host'] = host
if port is not MyNone:
self.conf['port'] = port
self._handle = None
if self.conf.get('ticket_cache') and self.conf.get('token'):
m = "It is not possible to use ticket_cache and token at same time"
raise RuntimeError(m)
if connect:
self.connect()
def __getstate__(self):
d = self.__dict__.copy()
del d['_handle']
logger.debug("Serialize with state: %s", d)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self._handle = None
self.connect()
def connect(self):
""" Connect to the name node
This happens automatically at startup
"""
get_lib()
conf = self.conf.copy()
if self._handle:
return
if HDFileSystem._first_pid is None:
HDFileSystem._first_pid = os.getpid()
elif HDFileSystem._first_pid != os.getpid():
warnings.warn("Attempting to re-use hdfs3 in child process %d, "
"but it was initialized in parent process %d. "
"Beware that hdfs3 is not fork-safe and this may "
"lead to bugs or crashes."
% (os.getpid(), HDFileSystem._first_pid),
RuntimeWarning, stacklevel=2)
o = _lib.hdfsNewBuilder()
_lib.hdfsBuilderSetNameNode(o, ensure_bytes(conf.pop('host')))
port = conf.pop('port', None)
if port is not None:
_lib.hdfsBuilderSetNameNodePort(o, port)
user = conf.pop('user', None)
if user is not None:
_lib.hdfsBuilderSetUserName(o, ensure_bytes(user))
effective_user = ensure_bytes(conf.pop('effective_user', None))
ticket_cache = conf.pop('ticket_cache', None)
if ticket_cache is not None:
_lib.hdfsBuilderSetKerbTicketCachePath(o, ensure_bytes(ticket_cache))
token = conf.pop('token', None)
if token is not None:
_lib.hdfsBuilderSetToken(o, ensure_bytes(token))
for par, val in conf.items():
if not _lib.hdfsBuilderConfSetStr(o, ensure_bytes(par),
ensure_bytes(val)) == 0:
warnings.warn('Setting conf parameter %s failed' % par)
fs = _lib.hdfsBuilderConnect(o, effective_user)
_lib.hdfsFreeBuilder(o)
if fs:
logger.debug("Connect to handle %d", fs.contents.filesystem)
self._handle = fs
else:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise ConnectionError('Connection Failed: {}'.format(msg))
def delegate_token(self, user=None):
"""Generate delegate auth token.
Parameters
----------
user: bytes/str
User to pass to delegation (defaults to user supplied to instance);
this user is the only one that can renew the token.
"""
if user is None and self.user is None:
raise ValueError('Delegation requires a user')
user = user or self.user
out = _lib.hdfsGetDelegationToken(self._handle, ensure_bytes(user))
if out:
self.token = out
return out
else:
raise RuntimeError('Token delegation failed')
def renew_token(self, token=None):
"""
Renew delegation token
Parameters
----------
token: str or None
If None, uses the instance's token. It is an error to do that if
there is no token.
Returns
-------
New expiration time for the token
"""
token = token or self.token
if token is None:
raise ValueError('There is no token to renew')
return _lib.hdfsRenewDelegationToken(self._handle, ensure_bytes(token))
def cancel_token(self, token=None):
"""
Revoke delegation token
Parameters
----------
token: str or None
If None, uses the instance's token. It is an error to do that if
there is no token.
"""
token = token or self.token
if token is None:
raise ValueError('There is no token to cancel')
out = _lib.hdfsCancelDelegationToken(self._handle, ensure_bytes(token))
if out:
raise RuntimeError('Token cancel failed')
if token == self.token:
# now our token is invalid - this FS may not work
self.token = None
def disconnect(self):
""" Disconnect from name node """
if self._handle:
logger.debug("Disconnect from handle %d",
self._handle.contents.filesystem)
_lib.hdfsDisconnect(self._handle)
self._handle = None
def open(self, path, mode='rb', replication=0, buff=0, block_size=0):
""" Open a file for reading or writing
Parameters
----------
path: string
Path of file on HDFS
mode: string
One of 'rb', 'wb', or 'ab'
replication: int
Replication factor; if zero, use system default (only on write)
buf: int (=0)
Client buffer size (bytes); if 0, use default.
block_size: int
Size of data-node blocks if writing
"""
if not self._handle:
raise IOError("Filesystem not connected")
if block_size and mode != 'wb':
raise ValueError('Block size only valid when writing new file')
if ('a' in mode and self.exists(path) and
replication != 0 and replication > 1):
raise IOError("Appending to an existing file with replication > 1"
" is unsupported")
if 'b' not in mode:
raise NotImplementedError("Text mode not supported, use mode='%s'"
" and manage bytes" % (mode + 'b'))
return HDFile(self, path, mode, replication=replication, buff=buff,
block_size=block_size)
def du(self, path, total=False, deep=False):
"""Returns file sizes on a path.
Parameters
----------
path : string
where to look
total : bool (False)
to add up the sizes to a grand total
deep : bool (False)
whether to recurse into subdirectories
"""
fi = self.ls(path, True)
if deep:
for apath in fi:
if apath['kind'] == 'directory':
fi.extend(self.ls(apath['name'], True))
if total:
return {path: sum(f['size'] for f in fi)}
return {p['name']: p['size'] for p in fi}
def df(self):
""" Used/free disc space on the HDFS system """
cap = _lib.hdfsGetCapacity(self._handle)
used = _lib.hdfsGetUsed(self._handle)
return {'capacity': cap,
'used': used,
'percent-free': 100 * (cap - used) / cap}
def get_block_locations(self, path, start=0, length=0):
""" Fetch physical locations of blocks """
if not self._handle:
raise IOError("Filesystem not connected")
start = int(start) or 0
length = int(length) or self.info(path)['size']
nblocks = ctypes.c_int(0)
out = _lib.hdfsGetFileBlockLocations(self._handle,
ensure_bytes(path),
ctypes.c_int64(start),
ctypes.c_int64(length),
ctypes.byref(nblocks))
locs = []
for i in range(nblocks.value):
block = out[i]
hosts = [block.hosts[i] for i in
range(block.numOfNodes)]
locs.append({'hosts': hosts, 'length': block.length,
'offset': block.offset})
_lib.hdfsFreeFileBlockLocations(out, nblocks)
return locs
def info(self, path):
""" File information (as a dict) """
if not self.exists(path):
raise FileNotFoundError(path)
fi = _lib.hdfsGetPathInfo(self._handle, ensure_bytes(path)).contents
out = fi.to_dict()
_lib.hdfsFreeFileInfo(ctypes.byref(fi), 1)
return out
def isdir(self, path):
"""Return True if path refers to an existing directory."""
try:
info = self.info(path)
return info['kind'] == 'directory'
except EnvironmentError:
return False
def isfile(self, path):
"""Return True if path refers to an existing file."""
try:
info = self.info(path)
return info['kind'] == 'file'
except EnvironmentError:
return False
def walk(self, path):
"""Directory tree generator, see ``os.walk``"""
full_dirs = []
dirs = []
files = []
for info in self.ls(path, True):
name = info['name']
tail = posixpath.split(name)[1]
if info['kind'] == 'directory':
full_dirs.append(name)
dirs.append(tail)
else:
files.append(tail)
yield path, dirs, files
for d in full_dirs:
for res in self.walk(d):
yield res
def glob(self, path):
""" Get list of paths mathing glob-like pattern (i.e., with "*"s).
If passed a directory, gets all contained files; if passed path
to a file, without any "*", returns one-element list containing that
filename. Does not support python3.5's "**" notation.
"""
path = ensure_string(path)
try:
f = self.info(path)
if f['kind'] == 'directory' and '*' not in path:
path = ensure_trailing_slash(path) + '*'
else:
return [f['name']]
except IOError:
pass
if '/' in path[:path.index('*')]:
ind = path[:path.index('*')].rindex('/')
root = path[:ind + 1]
else:
root = '/'
allpaths = []
for dirname, dirs, fils in self.walk(root):
allpaths.extend(posixpath.join(dirname, d) for d in dirs)
allpaths.extend(posixpath.join(dirname, f) for f in fils)
pattern = re.compile("^" + path.replace('//', '/')
.rstrip('/')
.replace('*', '[^/]*')
.replace('?', '.') + "$")
return [p for p in allpaths
if pattern.match(p.replace('//', '/').rstrip('/'))]
def ls(self, path, detail=False):
""" List files at path
Parameters
----------
path : string/bytes
location at which to list files
detail : bool (=True)
if True, each list item is a dict of file properties;
otherwise, returns list of filenames
"""
if not self.exists(path):
raise FileNotFoundError(path)
num = ctypes.c_int(0)
fi = _lib.hdfsListDirectory(self._handle, ensure_bytes(path),
ctypes.byref(num))
out = [fi[i].to_dict() for i in range(num.value)]
_lib.hdfsFreeFileInfo(fi, num.value)
if detail:
return out
else:
return [o['name'] for o in out]
@property
def host(self):
return self.conf.get('host', '')
@property
def port(self):
return self.conf.get('port', '')
def __repr__(self):
if self._handle is None:
state = 'Disconnected'
else:
state = 'Connected'
return 'hdfs://%s:%s, %s' % (self.host, self.port, state)
def __del__(self):
if self._handle:
self.disconnect()
def mkdir(self, path):
""" Make directory at path """
out = _lib.hdfsCreateDirectory(self._handle, ensure_bytes(path))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Create directory failed: {}'.format(msg))
def makedirs(self, path, mode=0o711):
""" Create directory together with any necessary intermediates """
out = _lib.hdfsCreateDirectoryEx(self._handle, ensure_bytes(path),
ctypes.c_short(mode), 1)
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Create directory failed: {}'.format(msg))
def set_replication(self, path, replication):
""" Instruct HDFS to set the replication for the given file.
If successful, the head-node's table is updated immediately, but
actual copying will be queued for later. It is acceptable to set
a replication that cannot be supported (e.g., higher than the
number of data-nodes).
"""
if replication < 0:
raise ValueError('Replication must be positive,'
' or 0 for system default')
out = _lib.hdfsSetReplication(self._handle, ensure_bytes(path),
ctypes.c_int16(int(replication)))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Set replication failed: {}'.format(msg))
def mv(self, path1, path2):
""" Move file at path1 to path2 """
if not self.exists(path1):
raise FileNotFoundError(path1)
out = _lib.hdfsRename(self._handle, ensure_bytes(path1),
ensure_bytes(path2))
return out == 0
def concat(self, destination, paths):
"""Concatenate inputs to destination
Source files *should* all have the same block size and replication.
The destination file must be in the same directory as
the source files. If the target exists, it will be appended to.
Some HDFSs impose that the target file must exist and be an exact
number of blocks long, and that each concated file except the last
is also a whole number of blocks.
The source files are deleted on successful
completion.
"""
if not self.exists(destination):
self.touch(destination)
arr = (ctypes.c_char_p * (len(paths) + 1))()
arr[:-1] = [ensure_bytes(s) for s in paths]
arr[-1] = ctypes.c_char_p() # NULL pointer
out = _lib.hdfsConcat(self._handle, ensure_bytes(destination), arr)
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Concat failed on %s %s' % (destination, msg))
def rm(self, path, recursive=True):
"Use recursive for `rm -r`, i.e., delete directory and contents"
if not self.exists(path):
raise FileNotFoundError(path)
out = _lib.hdfsDelete(self._handle, ensure_bytes(path), bool(recursive))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Remove failed on %s %s' % (path, msg))
def exists(self, path):
""" Is there an entry at path? """
out = _lib.hdfsExists(self._handle, ensure_bytes(path))
return out == 0
def chmod(self, path, mode):
"""Change access control of given path
Exactly what permissions the file will get depends on HDFS
configurations.
Parameters
----------
path : string
file/directory to change
mode : integer
As with the POSIX standard, each octal digit refers to
user-group-all, in that order, with read-write-execute as the
bits of each group.
Examples
--------
Make read/writeable to all
>>> hdfs.chmod('/path/to/file', 0o777) # doctest: +SKIP
Make read/writeable only to user
>>> hdfs.chmod('/path/to/file', 0o700) # doctest: +SKIP
Make read-only to user
>>> hdfs.chmod('/path/to/file', 0o100) # doctest: +SKIP
"""
if not self.exists(path):
raise FileNotFoundError(path)
out = _lib.hdfsChmod(self._handle, ensure_bytes(path),
ctypes.c_short(mode))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("chmod failed on %s %s" % (path, msg))
def chown(self, path, owner, group):
""" Change owner/group """
if not self.exists(path):
raise FileNotFoundError(path)
out = _lib.hdfsChown(self._handle, ensure_bytes(path),
ensure_bytes(owner), ensure_bytes(group))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("chown failed on %s %s" % (path, msg))
def cat(self, path):
""" Return contents of file """
if not self.exists(path):
raise FileNotFoundError(path)
with self.open(path, 'rb') as f:
result = f.read()
return result
def get(self, hdfs_path, local_path, blocksize=DEFAULT_READ_BUFFER_SIZE):
""" Copy HDFS file to local """
# TODO: _lib.hdfsCopy() may do this more efficiently
if not self.exists(hdfs_path):
raise FileNotFoundError(hdfs_path)
with self.open(hdfs_path, 'rb') as f:
with open(local_path, 'wb') as f2:
out = 1
while out:
out = f.read(blocksize)
f2.write(out)
def getmerge(self, path, filename, blocksize=DEFAULT_READ_BUFFER_SIZE):
""" Concat all files in path (a directory) to local output file """
files = self.ls(path)
with open(filename, 'wb') as f2:
for apath in files:
with self.open(apath, 'rb') as f:
out = 1
while out:
out = f.read(blocksize)
f2.write(out)
def put(self, filename, path, chunk=DEFAULT_WRITE_BUFFER_SIZE, replication=0, block_size=0):
""" Copy local file to path in HDFS """
with self.open(path, 'wb', replication=replication,
block_size=block_size) as target:
with open(filename, 'rb') as source:
while True:
out = source.read(chunk)
if len(out) == 0:
break
target.write(out)
def tail(self, path, size=1024):
""" Return last bytes of file """
length = self.du(path)[ensure_trailing_slash(path)]
if size > length:
return self.cat(path)
with self.open(path, 'rb') as f:
f.seek(length - size)
return f.read(size)
def head(self, path, size=1024):
""" Return first bytes of file """
with self.open(path, 'rb') as f:
return f.read(size)
def touch(self, path):
""" Create zero-length file """
self.open(path, 'wb').close()
def read_block(self, fn, offset, length, delimiter=None):
""" Read a block of bytes from an HDFS file
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned will not include the surrounding delimiter strings.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename on HDFS
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> hdfs.read_block('/data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> hdfs.read_block('/data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200'
See Also
--------
hdfs3.utils.read_block
"""
with self.open(fn, 'rb') as f:
size = f.info()['size']
if offset + length > size:
length = size - offset
bytes = read_block(f, offset, length, delimiter)
return bytes
def list_encryption_zones(self):
"""Get list of all the encryption zones"""
x = ctypes.c_int(8)
out = _lib.hdfsListEncryptionZones(self._handle, x)
if not out:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("EZ listing failed: %s" % msg)
res = [out[i].to_dict() for i in range(x.value)]
if res:
_lib.hdfsFreeEncryptionZoneInfo(out, x)
return res
def create_encryption_zone(self, path, key_name):
out = _lib.hdfsCreateEncryptionZone(self._handle, ensure_bytes(path),
ensure_bytes(key_name))
if out != 0:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("EZ create failed: %s %s" % (path, msg))
def get_lib():
""" Import C-lib only on demand """
global _lib
if _lib is None:
from .lib import _lib as l
_lib = l
mode_numbers = {'w': 1, 'r': 0, 'a': 1025,
'wb': 1, 'rb': 0, 'ab': 1025}
class HDFile(object):
""" File on HDFS
Matches the standard Python file interface.
Examples
--------
>>> with hdfs.open('/path/to/hdfs/file.txt') as f: # doctest: +SKIP
... bytes = f.read(1000) # doctest: +SKIP
>>> with hdfs.open('/path/to/hdfs/file.csv') as f: # doctest: +SKIP
... df = pd.read_csv(f, nrows=1000) # doctest: +SKIP
"""
def __init__(self, fs, path, mode, replication=0, buff=0, block_size=0):
""" Called by open on a HDFileSystem """
if 't' in mode:
raise NotImplementedError("Opening a file in text mode is not"
" supported, use ``io.TextIOWrapper``.")
self.fs = fs
self.path = path
self.replication = replication
self.buff = buff
self._fs = fs._handle
self.buffers = []
self._handle = None
self.mode = mode
self.block_size = block_size
self.lines = deque([])
self._set_handle()
self.size = self.info()['size']
def _set_handle(self):
out = _lib.hdfsOpenFile(self._fs, ensure_bytes(self.path),
mode_numbers[self.mode], self.buff,
ctypes.c_short(self.replication),
ctypes.c_int64(self.block_size))
if not out:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError("Could not open file: %s, mode: %s %s" %
(self.path, self.mode, msg))
self._handle = out
def readinto(self, length, out):
"""
Read up to ``length`` bytes from the file into the ``out`` buffer,
which can be of any type that implements the buffer protocol (example: ``bytearray``,
``memoryview`` (py3 only), numpy array, ...).
Parameters
----------
length : int
maximum number of bytes to read
out : buffer
where to write the output data
Returns
-------
int
number of bytes read
"""
if not _lib.hdfsFileIsOpenForRead(self._handle):
raise IOError('File not in read mode')
bufsize = length
bufpos = 0
# convert from buffer protocol to ctypes-compatible type
buflen = _nbytes(out)
buf_for_ctypes = (ctypes.c_byte * buflen).from_buffer(out)
while length:
bufp = ctypes.byref(buf_for_ctypes, bufpos)
ret = _lib.hdfsRead(
self._fs, self._handle, bufp, ctypes.c_int32(bufsize - bufpos))
if ret == 0: # EOF
break
if ret > 0:
length -= ret
bufpos += ret
else:
raise IOError('Read file %s Failed:' % self.path, -ret)
return bufpos
def read(self, length=None, out_buffer=None):
"""
Read up to ``length`` bytes from the file. Reads shorter than ``length``
only occur at the end of the file, if less data is available.
If ``out_buffer`` is given, read directly into ``out_buffer``. It can be
anything that implements the buffer protocol, for example ``bytearray``,
``memoryview`` (py3 only), numpy arrays, ...
Parameters
----------
length : int
number of bytes to read. if it is None, read all remaining bytes
from the current position.
out_buffer : buffer, None or True
the buffer to use as output, None to return bytes, True to create
and return new buffer
Returns
-------
bytes
the data read (only if out_buffer is None)
memoryview
the data read as a memoryview into the buffer
"""
return_buffer = out_buffer is not None
max_read = self.size - self.tell()
read_length = max_read if length in [None, -1] else length
read_length = min(max_read, read_length)
if out_buffer is None or out_buffer is True:
out_buffer = bytearray(read_length)
else:
if _nbytes(out_buffer) < read_length:
raise IOError('buffer too small (%d < %d)' % (_nbytes(out_buffer), read_length))
bytes_read = self.readinto(length=read_length, out=out_buffer)
if bytes_read < _nbytes(out_buffer):
out_buffer = memoryview(out_buffer)[:bytes_read]
if return_buffer:
return memoryview(out_buffer)
return memoryview(out_buffer).tobytes()
def readline(self, chunksize=0, lineterminator='\n'):
""" Return a line using buffered reading.
A line is a sequence of bytes between ``'\n'`` markers (or given
line-terminator).
Line iteration uses this method internally.
Note: this function requires many calls to HDFS and is slow; it is
in general better to wrap an HDFile with an ``io.TextIOWrapper`` for
buffering, text decoding and newline support.
"""
if chunksize == 0:
chunksize = self.buff if self.buff != 0 else DEFAULT_READ_BUFFER_SIZE
lineterminator = ensure_bytes(lineterminator)
start = self.tell()
seek_delimiter(self, lineterminator, chunksize, allow_zero=False)
end = self.tell()
self.seek(start)
return self.read(end - start)
def _genline(self):
while True:
out = self.readline()
if out:
yield out
else:
raise StopIteration
def __iter__(self):
""" Enables `for line in file:` usage """
return self._genline()
def __next__(self):
""" Enables reading a file as a buffer in pandas """
out = self.readline()
if out:
return out
else:
raise StopIteration
# PY2 compatibility
next = __next__
def readlines(self):
""" Return all lines in a file as a list """
return list(self)
def tell(self):
""" Get current byte location in a file """
out = _lib.hdfsTell(self._fs, self._handle)
if out == -1:
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Tell Failed on file %s %s' % (self.path, msg))
return out
def seek(self, offset, from_what=0):
""" Set file read position. Read mode only.
Attempt to move out of file bounds raises an exception. Note that,
by the convention in python file seek, offset should be <=0 if
from_what is 2.
Parameters
----------
offset : int
byte location in the file.
from_what : int 0, 1, 2
if 0 (befault), relative to file start; if 1, relative to current
location; if 2, relative to file end.
Returns
-------
new position
"""
if from_what not in {0, 1, 2}:
raise ValueError('seek mode must be 0, 1 or 2')
info = self.info()
if from_what == 1:
offset = offset + self.tell()
elif from_what == 2:
offset = info['size'] + offset
if offset < 0 or offset > info['size']:
raise ValueError('Attempt to seek outside file')
out = _lib.hdfsSeek(self._fs, self._handle, ctypes.c_int64(offset))
if out == -1: # pragma: no cover
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Seek Failed on file %s' % (self.path, msg))
return self.tell()
def info(self):
""" Filesystem metadata about this file """
return self.fs.info(self.path)
def write(self, data):
""" Write bytes to open file (which must be in w or a mode) """
data = ensure_bytes(data)
if not data:
return
if not _lib.hdfsFileIsOpenForWrite(self._handle):
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('File not write mode: {}'.format(msg))
write_block = self.buff if self.buff != 0 else DEFAULT_WRITE_BUFFER_SIZE
for offset in range(0, len(data), write_block):
d = ensure_bytes(data[offset:offset + write_block])
if not _lib.hdfsWrite(self._fs, self._handle, d, len(d)) == len(d):
msg = ensure_string(_lib.hdfsGetLastError()).split('\n')[0]
raise IOError('Write failed on file %s, %s' % (self.path, msg))
return len(data)
def flush(self):
""" Send buffer to the data-node; actual write may happen later """
_lib.hdfsFlush(self._fs, self._handle)
def close(self):
""" Flush and close file, ensuring the data is readable """
self.flush()
_lib.hdfsCloseFile(self._fs, self._handle)
self._handle = None # _libhdfs releases memory
self.mode = 'closed'
@property
def read1(self):
return self.read
@property
def closed(self):
return self.mode == 'closed'
def writable(self):
return self.mode.startswith('w') or self.mode.startswith('a')
def seekable(self):
return self.readable()
def readable(self):
return self.mode.startswith('r')
def __del__(self):
self.close()
def __repr__(self):
return 'hdfs://%s:%s%s, %s' % (self.fs.host, self.fs.port,
self.path, self.mode)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| bsd-3-clause |
ramansbach/cluster_analysis | clustering/scripts/analyze_cut_mu2_het.py | 1 | 6212 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 13 07:55:15 2017
@author: Rachael Mansbach
Script to do the following (after running analyze_clusters of some kind to get
the appropriate cluster sizes)
* Plot Mass-averaged cluster size of contact and optical clusters
both separately and in the same plot, including standard deviation over runs
and save raw mu2 data
* Compute linear and nonlinear Smoluchowski fits & plot for contact & optical clusters
Most parameters are the same as in analyze_clusters_serial.py
"""
from __future__ import absolute_import, division, print_function
from mpi4py import MPI
from time import time
import clustering as cl
import gsd.hoomd
import os.path as op
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pdb
#data_path = '/home/rachael/Analysis_and_run_code/analysis/cluster_analysis/clustering/data'
save_path=SSS
data_path=save_path
#Matlab setup
plt.ioff()
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
runs = 5
ttotal = 1000
ttotals = {'contact':ttotal,'optical':ttotal}
tstart = 10
ats = {'contact':17,'optical':12}
#molno = 4
molno = 10240
c1 = max(float(BBBA),float(BBBB))
c1 = max(1.1,(c1/100.)*1.1225+0.1)
cs={'contact':c1,'optical':0.35}
compairs = np.array([[0,6],[1,7],[2,8],[3,9],[4,10],[5,11]])
#compairs = np.array([[0,1],[2,3],[4,5],[6,7],[8,9],[10,11]])
molnolabel = 10000
AAdlabel = AAA
SCdlabel = SCSCSC
BBdlabel = BBBB
dt = 1.0
atype = u'LS'
#atype = 'AB'
combeadtype = 'E'
colors = {'contact':'red','optical':'blue'}
fbase = 'mols'+str(molnolabel)+'_' + str(AAdlabel)+'-02-'\
+str(SCdlabel)+'-150-'+str(BBdlabel)+'_small_run'
#fbase = 'dummyfull4_run'
fnames = []
for i in range(runs):
fname = op.join(data_path,fbase + str(i+1) + '.gsd')
fnames.append(fname)
run = 0
mu2s = {'contact':np.zeros([ttotal,runs]),
'optical':np.zeros([ttotal,runs]),}
start = time()
for ctype in ['contact','optical']:
#pdb.set_trace()
cszNames = [op.join(save_path,fbase + 'cut'+str(cs[ctype])+ str(runi+1) + ctype + '-sizes.dat') \
for runi in range(runs)]
for run in range(runs):
# pdb.set_trace()
ccsizes = cl.getSizesFromFile([cszNames[run]],ttotal)
mu2 = np.zeros(np.shape(ccsizes)[0])
for m in range(np.shape(ccsizes)[0]):
mu2[m] = cl.massAvSize(ccsizes[m,:])
mu2s[ctype][:,run] = mu2
end = time()
print("Time to get mu2 from file: ",end-start)
figall = plt.figure()
axall = figall.add_subplot(111)
figeach = plt.figure()
axseach = {}
cid = 0
start = time()
finsize = dict()
sfinsize = dict()
for ctype in ['contact','optical']:
axseach[ctype] = figeach.add_subplot(3,1,cid+1)
mu2sc = mu2s[ctype]
ymax = np.max(mu2sc[10:len(mu2sc),:])
mu2smean = np.mean(mu2s[ctype],axis=1)
mu2sstd = np.std(mu2s[ctype],axis=1)
finsize[ctype] = mu2smean[len(mu2smean)-1]
sfinsize[ctype] = mu2sstd[len(mu2sstd)-1]
runl, = axall.plot(dt*np.arange(0,ttotal),mu2smean,linewidth=2,
color=colors[ctype])
mu2final = mu2smean[len(mu2smean)-1]
if ctype == 'contact':
axall.set_ylim([0,ymax])
tend = max(np.argmin(abs(mu2smean[tstart:len(mu2smean)]-0.97*10648))+tstart,tstart+10)
#pdb.set_trace()
axseach[ctype].set_ylim([0,ymax])
runl.set_label(ctype)
axseach[ctype].plot(dt*np.arange(ttotal),mu2smean,linewidth=2,
color=colors[ctype])
axall.fill_between(np.arange(ttotal),mu2smean-mu2sstd,mu2smean+mu2sstd,
facecolor=colors[ctype],alpha=0.3)
axseach[ctype].fill_between(np.arange(ttotal),mu2smean-mu2sstd,
mu2smean+mu2sstd,facecolor=colors[ctype],
alpha=0.3)
axseach[ctype].set_xlabel(r'$t/\tau^*$')
axseach[ctype].set_ylabel(r'$ \langle \mu_2 \rangle$')
axseach[ctype].grid('on')
fmu2s = open(op.join(save_path,fbase + '-' + ctype + '-mu2s.dat'),'w')
#pdb.set_trace()
for i in range(np.shape(mu2s[ctype])[0]):
for j in range(np.shape(mu2s[ctype])[1]):
fmu2s.write('{0} '.format(mu2s[ctype][i][j]))
fmu2s.write('\n')
fmu2s.close()
cid += 1
axall.set_xlabel(r'$t$/$\tau^*$')
axall.set_ylabel(r'$ \langle \mu_2 \rangle$')
handles, labels = axall.get_legend_handles_labels()
lgd = axall.legend(handles, labels, loc='upper center',
bbox_to_anchor=(0.5,-0.2))
axall.grid('on')
figall.savefig(op.join(save_path,fbase+'-mu2s-all'),
bbox_extra_artists=(lgd,),
bbox_inches='tight')
figeach.savefig(op.join(save_path,fbase+'-mu2s-each'),
bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close('all')
end = time()
print("Time to plot mu2s: ",end-start)
#Smoluchowski fitting
start = time()
for ctype in ['contact','optical']:
cszNames = [op.join(save_path,fbase + 'cut'+str(cs[ctype])+str(runi+1) + ctype + '-sizes.dat') \
for runi in range(runs)]
(tc,sigtc) = cl.linearWithErrors(cszNames,ttotals[ctype],tstart=tstart,dt=dt,
plotstats=[op.join(save_path,
fbase+'smol-linear-'+ctype),
r'$t/\tau^*$',
'$\mu_2$',['o','x','^','v','s']],
tend=tend)
(nltc,nlsigtc,sse,lmbda,siglmbda) = \
cl.nonlinearWithErrorsFromFile(cszNames,ttotals[ctype],dt=dt,tstart=tstart,
plotstats=[op.join(save_path,
fbase+'smol-NL-'+ctype)
,'t (0.05 ns)','$\mu_2$',
['o','x','^','v','s']],tend=tend)
ftcs = open(op.join(save_path,fbase+'smol-data-'+ctype+'.dat'),'w')
ftcs.write('#tc sigtc nltc nlsigtc lmbda siglmbda sse finsize sigfinsize\n')
ftcs.write('{0} {1} {2} {3} {4} {5} {6} {7} {8}\n'.format(tc,sigtc,nltc[0],nlsigtc,
lmbda,siglmbda,sse,finsize[ctype],
sfinsize[ctype]))
ftcs.close()
end = time()
print("Time to fit Smol: ",end-start)
| mit |
ViennaRNA/forgi | examples/plot_projection.py | 1 | 8834 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import map
from builtins import range
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
from future.builtins.disabled import (apply, cmp, coerce, execfile,
file, long, raw_input, reduce, reload,
unicode, xrange, StandardError)
import sys
import random
import math
import os
import itertools as it
import collections as col
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import forgi.threedee.model.coarse_grain as ftmc
import forgi.projection.projection2d as ftmp
import forgi.threedee.utilities.vector as ftuv
import argparse
#################### MAIN FUNCTION #####################################
def main(args):
files = args.cgfiles
# Uncomment the following line to display the files in a random order.
# random.shuffle(files)
# Prepare the pyplot figure
totalFigures = len(files)
figuresPerLine = int(math.ceil(math.sqrt(totalFigures)))
fig, ax = plt.subplots(int(math.ceil(
totalFigures / figuresPerLine)), figuresPerLine, squeeze=False, figsize=(8, 8))
# Background color of figure (not plot)
if args.style == "WOB":
fig.patch.set_facecolor('black')
# Plot one projection per file.
for i, file_ in enumerate(files):
# get the subplot axes (Note: axes != axis in matplotlib)
current_axes = ax[i // figuresPerLine, i % figuresPerLine]
# Parse the file
cg = ftmc.CoarseGrainRNA(file_)
# Random projection direction, if no direction present in the file
if args.proj_direction:
direction = list(map(float, args.proj_direction.split(",")))
elif cg.project_from is not None:
direction = cg.project_from
else:
direction = ftuv.get_random_vector()
# Generate the projection object
proj = ftmp.Projection2D(
cg, direction, rotation=180, project_virtual_atoms=args.virtual_atoms)
# Simulate a reduced resolution of the image.
if args.condense:
proj.condense(args.condense)
target_elems = []
if args.show_distances:
try:
num_elems = int(args.show_distances)
except:
target_elems = args.show_distances.split(",")
else:
if num_elems > len(proj._coords.keys()):
raise ValueError("--show-distances must not be greater {} for the current projection ({}:'{}')".format(
len(proj._coords.keys()), i, file_))
elems = list(proj._coords.keys())
random.shuffle(elems)
while len(target_elems) < num_elems:
r = random.random()
if r < 0.4:
hairpins = [x for x in elems if x[0]
== "h" and x not in target_elems]
if hairpins:
target_elems.append(hairpins[0])
continue
if r < 0.6:
multiloops = [x for x in elems if x[0]
== "m" and x not in target_elems]
if multiloops:
target_elems.append(multiloops[0])
continue
others = [x for x in elems if x not in target_elems]
target_elems.append(others[0])
comb = list(it.combinations(target_elems, 2))
#print(comb, target_elems)
if args.label_elements:
target_elems = list(proj._coords.keys())
line2dproperties = {}
if args.style == "BOW":
line2dproperties["color"] = "black"
elif args.style == "WOB":
line2dproperties["color"] = "white"
#Plot the projection #
proj.plot(current_axes, margin=15, linewidth=3, add_labels=set(target_elems), line2dproperties=line2dproperties,
show_distances=comb, print_distances=args.print_distances)
# Uncomment to set a substring of the filename as a title
# current_axes.set_title(file[-15:])
# Hide the x- and y axis.
current_axes.get_xaxis().set_visible(False)
current_axes.get_yaxis().set_visible(False)
# Print the projection direction and the filename in the plot.
if args.show_direction or args.p:
current_axes.text(0.01, 0.01, "Projection direction: ({},{},{})".format(round(
direction[0], 3), round(direction[1], 3), round(direction[2], 3)), transform=current_axes.transAxes)
if args.show_filename or args.p:
current_axes.text(0.01, 0.99, "File: {}".format(
file_), transform=current_axes.transAxes, verticalalignment='top',)
# Change the backgroundcolor of the plot area.
if args.style == "WOB":
current_axes.set_axis_bgcolor('black')
# Hide additional subplots with no projection on them.
for i in range(len(files), int(math.ceil(totalFigures / figuresPerLine)) * figuresPerLine):
ax[i // figuresPerLine, i % figuresPerLine].axis('off')
# Reduce the space outside of the plots and between the subplots.
plt.subplots_adjust(left=0.025, right=0.975, bottom=0.025,
top=0.975, wspace=0.05, hspace=0.05)
if args.out:
for ofname in args.out:
if args.out_path:
ofname = os.path.join(args.out_path, ofname)
ofname = os.path.expanduser(ofname)
plt.savefig(ofname, format=ofname[-3:])
if not args.out or args.show:
# Show the plot and clear it from the internal memory of matplotlib.
plt.show()
########################### END OF MAIN FUNCTION #########################################
def get_parser():
"""
Here all commandline & help-messages arguments are defined.
:returns: an instance of argparse.ArgumentParser
"""
parser = argparse.ArgumentParser()
# Argument(s)
parser.add_argument(
'cgfiles', nargs='+', help='One or more *.cg/*.coord files holding the RNA to plot.')
parser.add_argument('--show-direction', action="store_true",
help='Print the projection direction in the plot')
parser.add_argument('--proj-direction', type=str,
help='Use the given projection direction instead of the one from the file. A comma seperated triple of floats (with no whitespace)')
parser.add_argument('--show-filename', action="store_true", default=False,
help='Print the filename of the input file in the figure')
parser.add_argument('-p', action="store_true",
help='Shortcut for --show-direction and -- show_filename. Note that texts are not visible in all styles.')
parser.add_argument('--label-elements', default=False, action="store_true",
help='Label all coarse-grained elements in the plot.')
parser.add_argument('--print-distances', default=False, action="store_true",
help='Print distances for all elements given in --show-distances at the side in the plot')
parser.add_argument('--out', '-o', type=str, nargs='+', help='One or more outfiles to save the resulting figure to. '
'The file format will be determined by the file ending. Formats could be e.g. "svg", "png" or "pgf"')
parser.add_argument('--out-path', type=str, nargs='?',
help='Optional path, used for all files given with the "--out" option')
parser.add_argument('--style', type=str, default="DEF", choices=["DEF", "WOB", "BOW", "COL"],
help='Plot style. "DEF" (default: color on white), "WOB" (white on black), "BOW" (Black on white), "COL" same as "DEF" ')
parser.add_argument('--condense', type=float,
help='Simulate resolution reduction. This is an experimental featyure and does not work very well.')
parser.add_argument('--show-distances', type=str,
help='Either an Integer, or a ","-separated list of coarse grained elements.')
parser.add_argument('--virtual-atoms', action="store_true",
help='Show virtual atoms (Slow).')
parser.add_argument('--show', action="store_true",
help='Show the plot. If args.out is not given, this is implicitely set to true..')
return parser
parser = get_parser()
if __name__ == "__main__":
main(parser.parse_args())
| gpl-3.0 |
eladnoor/ms-tools | fia/annotate.py | 1 | 5648 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 23:54:03 2015
@author: eladn
"""
import numpy as np
import sys
import os
import argparse
import pandas as pd
from tqdm import tqdm
base_path = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(base_path)
from openbis import download_data_profiles, get_sample_names
# script parameters (as determined by Mattia)
MIN_PEAK_SIZE = 5000
MAX_MZ_DIFFERENCE = 0.003
REF_MASS_RANGE = (50, 1000)
REFERENCE_MASS_FNAME = os.path.join(base_path, 'EMDTB.csv')
if not os.path.exists(REFERENCE_MASS_FNAME):
raise Exception('Cannot locate the CSV file containing reference masses: '
+ REFERENCE_MASS_FNAME)
def findpeaks(a):
"""
Returns:
list of indices of the local maxima in a 1D array 'a'. A local peak is
larger than its two neighboring samples. Endpoints are excluded.
If a peak is flat, the function returns only the point with the lowest index.
"""
tmp = np.diff(np.sign(np.diff(a.flat)))
return np.where(tmp == -2)[0] + 1
#%%
parser = argparse.ArgumentParser(description='Download FIA raw data from openBIS')
parser.add_argument('exp_code', type=str,
help='the openBIS experiment ID')
parser.add_argument('-o', dest='output_path', type=str, default=None,
help='the path where all output files will be written')
args = parser.parse_args()
#%%
sample_df = pd.Series(get_sample_names(args.exp_code))
sample_df.index.name = 'sample.code'
sample_df.name = 'sample.name'
sample_df = sample_df.to_frame().sort_index()
dataProfiles = download_data_profiles(args.exp_code)
dsSampleCodes = sorted(dataProfiles.keys())
n_samples = len(dsSampleCodes)
# allPeaks is a list of matrices (one per sample) of the ion mz and intensity
# only for the peaks (i.e. local maxima)
allPeaks = {}
#%% identify peaks (local maxima)
for s in tqdm(dsSampleCodes, desc='Identifying centroids'):
# find all the values that are local maxima and pass the threshold
idxs = findpeaks(dataProfiles[s][:, 1])
idxs = list(filter(lambda j : dataProfiles[s][j, 1] >= MIN_PEAK_SIZE, idxs))
allPeaks[s] = dataProfiles[s][idxs, :]
#%% Use the reference table to associate peaks to compounds, by minimum mass distance
reference_df = pd.read_csv(REFERENCE_MASS_FNAME)
reference_df.index.name = 'ion.code'
# subtract the mass of H+ (i.e. look for deprotonated masses)
proton_mass = reference_df.loc[0, 'mass']
# keep only ions in the relevant range for FIA
compound_df = reference_df[(REF_MASS_RANGE[0] < reference_df['mass']) &
(REF_MASS_RANGE[1] > reference_df['mass'])]
# peak_masses[i, j] will contain the exact mass of the peak which is closest
# to reference 'j' in sample 'i'. If there is no peak which is close enough
# (i.e. in the range of MAX_MZ_DIFFERENCE), the value will be NaN
# peak_indices[i, j] will contain the index of that peak in 'allPeaks[i]'
peak_masses = pd.DataFrame(index=reference_df.index, columns=dsSampleCodes,
dtype=np.single)
peak_indices = pd.DataFrame(index=reference_df.index, columns=dsSampleCodes,
dtype=int)
for s in tqdm(dsSampleCodes, desc='Identifying metabolites'):
for j, refmass in reference_df['mass'].items():
diffs = abs(allPeaks[s][:, 0] + proton_mass - refmass)
peak_idx = np.argmin(diffs)
if diffs[peak_idx] <= MAX_MZ_DIFFERENCE:
peak_indices.loc[j, s] = peak_idx
peak_masses.loc[j, s] = allPeaks[s][peak_idx, 0]
else:
peak_indices.loc[j, s] = -1
peak_masses.loc[j, s] = np.nan
# keep only the reference masses that actually have a 'hit' in at least one
# of the samples, and calculate the median of all samples where a peak was
# associated with this mass
ref_hits = (peak_indices != -1).any(1)
peak_indices = peak_indices.loc[ref_hits, :]
median_masses = peak_masses.loc[ref_hits, :].median(1)
compound_df = compound_df.loc[ref_hits, :]
#%%
# data_df[i, j] will contain the intensity of the peak which was associated with
# reference mass 'j' in sample 'i'. If there wasn't any close enough mass,
# we take the median mass of the ions associated with this reference mass
# across all other samples, and find the ion closest to the median (even if
# it is not actually a peak).
data_df = pd.DataFrame(index=compound_df.index, columns=dsSampleCodes,
dtype=np.single)
data_df.index.name = 'ion.code'
for s in tqdm(dsSampleCodes, desc='Creating final matrix'):
for j, median_mass in median_masses.items():
peak_idx = peak_indices.loc[j, s]
if peak_idx != -1:
# if there is a peak associated with the metabolite,
# get the intensity of that peak
data_df.loc[j, s] = allPeaks[s][peak_idx, 1]
else:
# otherwise, get the intensity from the closest mz in the raw data
idx = np.argmin(np.abs(median_mass - dataProfiles[s][:, 0]))
data_df.loc[j, s] = dataProfiles[s][idx, 1]
#merged = compound_df.join(merged)
#%%
if args.output_path is None:
args.output_path = os.path.join(os.path.abspath(os.path.curdir),
args.exp_code)
ion_fname = args.output_path + '_ions.csv'
sample_fname = args.output_path + '_samples.csv'
data_fname = args.output_path + '_data.csv'
sys.stderr.write('\nWriting results to output CSV files to path "%s" ... '
% args.output_path)
compound_df.to_csv(ion_fname)
data_df.to_csv(data_fname)
sample_df.to_csv(sample_fname)
sys.stderr.write('[DONE]\n')
| mit |
DavidMFreeman34/NoahDavidCollab | bubuko-script.py | 1 | 1660 | #-----------------------------------------
# Python + Matplotlib Penrose
# Taken from http://www.bubuko.com/infodetail-911894.html
#-----------------------------------------
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.path import Path
from matplotlib.patches import PathPatch
plt.figure(figsize=(8,6),dpi=100)
plt.subplot(aspect=1)
plt.axis([-0.6,0.6,-0.6,0.6])
plt.xticks([])
plt.yticks([])
plt.axis('off')
a = 0.5*(np.sqrt(5)-1)
def subdivide(triangles):
result = []
for color,A,B,C in triangles:
if color == 0:
P = A + (B-A)*a
result += [(0,C,P,B),(1,P,C,A)]
else:
Q = B+(A-B)*a
R = B+(C-B)*a
result +=[(1,R,C,A),(1,Q,R,B),(0,R,Q,A)]
return result
def DrawFigure(triangles):
for color,A,B,C in triangles:
vertices = [C,A,B]
codes = [Path.MOVETO]+[Path.LINETO]*2
tri = Path(vertices,codes)
if color == 0:
tri_patch=PathPatch(tri,facecolor='#FF0099',edgecolor='#666666',linewidth=0.8)
else:
tri_patch=PathPatch(tri,facecolor='#66CCFF',edgecolor='#666666',linewidth=0.8)
plt.gca().add_patch(tri_patch)
# plt.show()
plt.savefig("exports/" + os.path.basename(__file__) + ".png", format='png')
triangles = []
A=np.array([0,0])
for i in range(10):
B = np.array([np.cos(0.2*np.pi*i),np.sin(0.2*np.pi*i)])
C = np.array([np.cos(0.2*np.pi*(i+1)),np.sin(0.2*np.pi*(i+1))])
if i%2 == 0:
B , C = C, B
triangles.append([0,A,B,C])
m = input('Enter number of divisions: ')
for j in xrange(m):
triangles=subdivide(triangles)
DrawFigure(triangles) | mit |
google-research/google-research | smurf/smurf_plotting.py | 1 | 5999 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SMURF plotting.
This library provides some plotting functionality for optical flow.
"""
# pylint:skip-file
import io
import os
import time
import matplotlib
matplotlib.use('Agg') # None-interactive plots do not need tk
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from smurf import smurf_utils
# How much to scale motion magnitude in visualization.
_FLOW_SCALING_FACTOR = 50.0
def print_log(log, epoch=None, mean_over_num_steps=1):
"""Print log returned by smurf.train(...)."""
if epoch is None:
status = ''
else:
status = '{} -- '.format(epoch)
status += 'total-loss: {:.6f}'.format(
np.mean(log['total-loss'][-mean_over_num_steps:]))
for key in sorted(log):
if key not in ['total-loss']:
loss_mean = np.mean(log[key][-mean_over_num_steps:])
status += ', {}: {:.6f}'.format(key, loss_mean)
print(status)
def print_eval(eval_dict):
"""Print eval_dict returned by the eval_function in smurf_main.py."""
status = ''.join(
['{}: {:.6f}, '.format(key, eval_dict[key]) for key in sorted(eval_dict)])
print(status[:-2])
def time_data_it(data_it, simulated_train_time_ms=100.0):
print('Timing training iterator with simulated train time of {:.2f}ms'.format(
simulated_train_time_ms))
for i in range(100):
start = time.time()
_ = data_it.get_next()
end = time.time()
print(i, 'Time to get one batch (ms):', (end - start) * 1000)
if simulated_train_time_ms > 0.0:
plt.pause(simulated_train_time_ms / 1000.)
def save_image_as_png(image, filename):
image_uint8 = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
image_png = tf.image.encode_png(image_uint8)
tf.io.write_file(filename, image_png)
def plot_data(data_it, plot_dir, num_plots):
print('Saving images from the dataset to', plot_dir)
for i, image_batch in enumerate(data_it):
if i >= num_plots:
break
for j, images in enumerate(image_batch['images']):
for k, image in enumerate(images):
save_image_as_png(
image, os.path.join(plot_dir, '{}_{}_{}.png'.format(i, j, k)))
def flow_to_rgb(flow):
"""Compute an RGB visualization of a flow field."""
shape = tf.cast(tf.shape(flow), tf.float32)
height, width = shape[-3], shape[-2]
scaling = _FLOW_SCALING_FACTOR / (height**2 + width**2)**0.5
# Compute angles and lengths of motion vectors.
motion_angle = tf.atan2(flow[Ellipsis, 1], flow[Ellipsis, 0])
motion_magnitude = (flow[Ellipsis, 1]**2 + flow[Ellipsis, 0]**2)**0.5
# Visualize flow using the HSV color space, where angles are represented by
# hue and magnitudes are represented by saturation.
flow_hsv = tf.stack([((motion_angle / np.math.pi) + 1.) / 2.,
tf.clip_by_value(motion_magnitude * scaling, 0.0, 1.0),
tf.ones_like(motion_magnitude)],
axis=-1)
# Transform colors from HSV to RGB color space for plotting.
return tf.image.hsv_to_rgb(flow_hsv)
def complete_paper_plot(plot_dir,
index,
image1,
image2,
flow_uv,
ground_truth_flow_uv=None,
flow_valid_occ=None,
predicted_occlusion=None,
ground_truth_occlusion=None,
frame_skip=None):
def post_imshow(name, plot_dir):
plt.xticks([])
plt.yticks([])
if frame_skip is not None:
filename = str(index) + '_' + str(frame_skip) + '_' + name
plt.savefig(os.path.join(plot_dir, filename), bbox_inches='tight')
else:
filepath = str(index) + '_' + name
plt.savefig(os.path.join(plot_dir, filepath), bbox_inches='tight')
plt.clf()
warp = smurf_utils.flow_to_warp(tf.convert_to_tensor(flow_uv))
image1_reconstruction = smurf_utils.resample(tf.expand_dims(image2, axis=0),
tf.expand_dims(warp, axis=0))[0]
flow_uv = -flow_uv[:, :, ::-1]
if ground_truth_flow_uv is not None:
ground_truth_flow_uv = -ground_truth_flow_uv[:, :, ::-1]
plt.figure()
plt.clf()
plt.imshow(image1)
post_imshow('image1_rgb', plot_dir)
plt.imshow(image1_reconstruction)
post_imshow('image1_reconstruction_rgb', plot_dir)
plt.imshow(image1_reconstruction * predicted_occlusion)
post_imshow('image1_reconstruction_occlusions_rgb', plot_dir)
plt.imshow((image1 + image2) / 2.)
post_imshow('image_rgb', plot_dir)
plt.imshow(flow_to_rgb(flow_uv))
post_imshow('predicted_flow', plot_dir)
if ground_truth_flow_uv is not None and flow_valid_occ is not None:
plt.imshow(flow_to_rgb(ground_truth_flow_uv * flow_valid_occ))
post_imshow('ground_truth_flow', plot_dir)
endpoint_error = np.sum(
(ground_truth_flow_uv - flow_uv)**2, axis=-1, keepdims=True)**0.5
plt.imshow(
(endpoint_error * flow_valid_occ)[:, :, 0],
cmap='viridis',
vmin=0,
vmax=40)
post_imshow('flow_error', plot_dir)
if predicted_occlusion is not None:
plt.imshow((predicted_occlusion[:, :, 0]) * 255, cmap='Greys')
post_imshow('predicted_occlusion', plot_dir)
if ground_truth_occlusion is not None:
plt.imshow((ground_truth_occlusion[:, :, 0]) * 255, cmap='Greys')
post_imshow('ground_truth_occlusion', plot_dir)
plt.close('all')
| apache-2.0 |
robbymeals/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
liyu1990/sklearn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
jayhetee/dask | dask/bag/core.py | 1 | 37332 | from __future__ import absolute_import, division, print_function
import itertools
import math
import tempfile
import inspect
import gzip
import zlib
import bz2
import os
from fnmatch import fnmatchcase
from glob import glob
from collections import Iterable, Iterator, defaultdict
from functools import wraps, partial
from dask.utils import takes_multiple_arguments
from toolz import (merge, frequencies, merge_with, take, reduce,
join, reduceby, valmap, count, map, partition_all, filter,
remove, pluck, groupby, topk)
import toolz
from ..utils import tmpfile, ignoring, file_size, textblock
with ignoring(ImportError):
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from ..multiprocessing import get as mpget
from ..core import istask, get_dependencies, reverse_dict
from ..optimize import fuse, cull, inline
from ..compatibility import (apply, BytesIO, unicode, urlopen, urlparse, quote,
unquote, StringIO)
from ..base import Base
names = ('bag-%d' % i for i in itertools.count(1))
tokens = ('-%d' % i for i in itertools.count(1))
load_names = ('load-%d' % i for i in itertools.count(1))
no_default = '__no__default__'
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list``
Example
-------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks
See Also:
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk):
""" Inline lists that are only used once
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards
"""
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items() if istask(v) and v
and v[0] is list
and len(dependents[k]) == 1]
return inline(dsk, keys, inline_constants=False)
def optimize(dsk, keys):
""" Optimize a dask from a dask.bag """
dsk2 = cull(dsk, keys)
dsk3 = fuse(dsk2)
dsk4 = inline_singleton_lists(dsk3)
dsk5 = lazify(dsk4)
return dsk5
def list2(seq):
""" Another list function that won't be removed by lazify """
return list(seq)
def to_textfiles(b, path, name_function=str):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
"""
if isinstance(path, (str, unicode)):
if '*' in path:
paths = [path.replace('*', name_function(i))
for i in range(b.npartitions)]
else:
paths = [os.path.join(path, '%s.part' % name_function(i))
for i in range(b.npartitions)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == b.npartitions
paths = path
else:
raise ValueError("Path should be either\n"
"1. A list of paths -- ['foo.json', 'bar.json', ...]\n"
"2. A directory -- 'foo/\n"
"3. A path with a * in it -- 'foo.*.json'")
name = next(names)
dsk = dict(((name, i), (write, (b.name, i), path))
for i, path in enumerate(paths))
return Bag(merge(b.dask, dsk), name, b.npartitions)
def finalize(bag, results):
if isinstance(bag, Item):
return results[0]
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
class Item(Base):
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
def _keys(self):
return [self.key]
def apply(self, func):
name = next(names)
dsk = {name: (func, self.key)}
return Item(merge(self.dask, dsk), name)
__int__ = __float__ = __complex__ = __bool__ = Base.compute
class Bag(Base):
""" Parallel collection of Python objects
Example
-------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.from_filenames('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(mpget)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, npartitions):
self.dask = dsk
self.name = name
self.npartitions = npartitions
self.str = StringAccessor(self)
def map(self, func):
""" Map a function across all elements in collection
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 10, 20, 30, 40]
"""
name = next(names)
if takes_multiple_arguments(func):
func = partial(apply, func)
dsk = dict(((name, i), (reify, (map, func, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def filter(self, predicate):
""" Filter elements in collection by a predicate function
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = next(names)
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = next(names)
dsk = dict(((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def map_partitions(self, func):
""" Apply function to every partition within collection
Note that this requires you to understand how dask.bag partitions your
data and so is somewhat internal.
>>> b.map_partitions(myfunc) # doctest: +SKIP
"""
name = next(names)
dsk = dict(((name, i), (func, (self.name, i)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = next(names)
if isinstance(key, list):
key = (list2, key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
@classmethod
def from_sequence(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_sequence is deprecated.\n"
"Use db.from_sequence instead.")
@classmethod
def from_filenames(cls, *args, **kwargs):
raise AttributeError("db.Bag.from_filenames is deprecated.\n"
"Use db.from_filenames instead.")
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=str):
return to_textfiles(self, path, name_function)
def fold(self, binop, combine=None, initial=None):
""" Splittable reduction
Apply binary operator on each partition to perform reduce. Follow by a
second binary operator to combine results
>>> b = from_sequence(range(5))
>>> b.fold(lambda x, y: x + y).compute() # doctest: +SKIP
10
Optionally provide default arguments and special combine binary
operator
>>> b.fold(lambda x, y: x + y, lambda x, y: x + y, 0).compute() # doctest: +SKIP
10
"""
a = next(names)
b = next(names)
if initial:
dsk = dict(((a, i), (reduce, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i), (reduce, binop, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (reduce, combine or binop, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
def frequencies(self):
""" Count number of occurrences of each distinct element
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (frequencies, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {(b, 0): (dictitems,
(merge_with, sum, list(sorted(dsk.keys()))))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def topk(self, k, key=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
a = next(names)
b = next(names)
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, key=key)
else:
func = topk
dsk = dict(((a, i), (list, (func, k, (self.name, i))))
for i in range(self.npartitions))
dsk2 = {(b, 0): (list, (func, k, (toolz.concat, list(dsk.keys()))))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
a = next(names)
dsk = dict(((a, i), (set, key)) for i, key in enumerate(self._keys()))
b = next(names)
dsk2 = {(b, 0): (apply, set.union, (list2, list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def reduction(self, perpartition, aggregate):
""" Reduce collection with reduction operators
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
Example
-------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
a = next(names)
b = next(names)
dsk = dict(((a, i), (perpartition, (self.name, i)))
for i in range(self.npartitions))
dsk2 = {b: (aggregate, list(dsk.keys()))}
return Item(merge(self.dask, dsk, dsk2), b)
@wraps(sum)
def sum(self):
return self.reduction(sum, sum)
@wraps(max)
def max(self):
return self.reduction(max, max)
@wraps(min)
def min(self):
return self.reduction(min, min)
@wraps(any)
def any(self):
return self.reduction(any, any)
@wraps(all)
def all(self):
return self.reduction(all, all)
def count(self):
""" Count the number of elements """
return self.reduction(count, sum)
def mean(self):
""" Arithmetic mean """
def chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def agg(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(chunk, agg)
def var(self, ddof=0):
""" Variance """
def chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def agg(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(chunk, agg)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Join collection with another collection
Other collection must be an Iterable, and not a Bag.
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
assert isinstance(other, Iterable)
assert not isinstance(other, Bag)
if on_other is None:
on_other = on_self
name = next(names)
dsk = dict(((name, i), (list, (join, on_other, other,
on_self, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags """
assert isinstance(other, Bag)
name = next(names)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i*m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
return type(self)(merge(self.dask, other.dask, dsk), name, n*m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default):
""" Combined reduction and groupby
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
Key Function
------------
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
Binops
------
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``.
Binop takes a running total and a new element and produces a new total
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
See also
--------
toolz.reduceby
pyspark.combineByKey
"""
a = next(names)
b = next(names)
if combine is None:
combine = binop
if initial is not no_default:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i), initial))
for i in range(self.npartitions))
else:
dsk = dict(((a, i),
(reduceby, key, binop, (self.name, i)))
for i in range(self.npartitions))
combine2 = lambda acc, x: combine(acc, x[1])
if combine_initial is not no_default:
dsk2 = {(b, 0): (dictitems,
(reduceby,
0, combine2,
(toolz.concat, (map, dictitems, list(dsk.keys()))),
combine_initial))}
else:
dsk2 = {(b, 0): (dictitems,
(merge_with,
(partial, reduce, combine),
list(dsk.keys())))}
return type(self)(merge(self.dask, dsk, dsk2), b, 1)
def take(self, k, compute=True):
""" Take the first k elements
Evaluates by default, use ``compute=False`` to avoid computation.
Only takes from the first partition
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
name = next(names)
dsk = {(name, 0): (list, (take, k, (self.name, 0)))}
b = Bag(merge(self.dask, dsk), name, 1)
if compute:
return tuple(b.compute())
else:
return b
def _keys(self):
return [(self.name, i) for i in range(self.npartitions)]
def concat(self):
""" Concatenate nested lists into one long list
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.concat())
[1, 2, 3]
"""
name = next(names)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
return type(self)(merge(self.dask, dsk), name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, npartitions=None, blocksize=2**20):
""" Group collection by key function
Note that this requires full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
>>> b = from_sequence(range(10))
>>> dict(b.groupby(lambda x: x % 2 == 0)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if npartitions is None:
npartitions = self.npartitions
import partd
p = ('partd' + next(tokens),)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, partd.File()))}
except AttributeError:
dsk1 = {p: (partd.Python, partd.File())}
# Partition data on disk
name = next(names)
dsk2 = dict(((name, i),
(partition, grouper, (self.name, i),
npartitions, p, blocksize))
for i in range(self.npartitions))
# Barrier
barrier_token = 'barrier' + next(tokens)
def barrier(args): return 0
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = next(names)
dsk4 = dict(((name, i),
(collect, grouper, i, p, barrier_token))
for i in range(npartitions))
return type(self)(merge(self.dask, dsk1, dsk2, dsk3, dsk4), name, npartitions)
def to_dataframe(self, columns=None):
""" Convert Bag to dask.dataframe
Bag should contain tuple or dict records.
Provide ``columns=`` keyword arg to specify column names.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if columns is None:
head = self.take(1)[0]
if isinstance(head, dict):
columns = sorted(head)
elif isinstance(head, (tuple, list)):
columns = list(range(len(head)))
name = next(names)
DataFrame = partial(pd.DataFrame, columns=columns)
dsk = dict(((name, i), (DataFrame, (list2, (self.name, i))))
for i in range(self.npartitions))
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(merge(optimize(self.dask, self._keys()), dsk),
name, columns, divisions)
def partition(grouper, sequence, npartitions, p, nelements=2**20):
""" Partition a bag along a grouper, store partitions on disk """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
opens = {'gz': gzip.open, 'bz2': bz2.BZ2File}
def from_filenames(filenames, chunkbytes=None):
""" Create dask by loading in lines from many files
Provide list of filenames
>>> b = from_filenames(['myfile.1.txt', 'myfile.2.txt']) # doctest: +SKIP
Or a globstring
>>> b = from_filenames('myfiles.*.txt') # doctest: +SKIP
Parallelize a large files by providing the number of uncompressed bytes to
load into each partition.
>>> b = from_filenames('largefile.txt', chunkbytes=1e7) # doctest: +SKIP
See also:
from_sequence: A more generic bag creation function
"""
if isinstance(filenames, str):
filenames = sorted(glob(filenames))
if not filenames:
raise ValueError("No filenames found")
full_filenames = [os.path.abspath(f) for f in filenames]
name = 'from-filename' + next(tokens)
if chunkbytes:
chunkbytes = int(chunkbytes)
taskss = [_chunk_read_file(fn, chunkbytes) for fn in full_filenames]
d = dict(((name, i), task)
for i, task in enumerate(toolz.concat(taskss)))
else:
extension = os.path.splitext(filenames[0])[1].strip('.')
myopen = opens.get(extension, open)
d = dict(((name, i), (list, (myopen, fn)))
for i, fn in enumerate(full_filenames))
return Bag(d, name, len(d))
def _chunk_read_file(filename, chunkbytes):
extension = os.path.splitext(filename)[1].strip('.')
compression = {'gz': 'gzip', 'bz2': 'bz2'}.get(extension, None)
return [(list, (StringIO, (bytes.decode,
(textblock, filename, i, i + chunkbytes, compression))))
for i in range(0, file_size(filename, compression), chunkbytes)]
def write(data, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
with ignoring(OSError):
os.makedirs(dirname)
ext = os.path.splitext(filename)[1][1:]
if ext == 'gz':
f = gzip.open(filename, 'wb')
data = (line.encode() for line in data)
elif ext == 'bz2':
f = bz2.BZ2File(filename, 'wb')
data = (line.encode() for line in data)
else:
f = open(filename, 'w')
try:
for item in data:
f.write(item)
finally:
f.close()
def _get_s3_bucket(bucket_name, aws_access_key, aws_secret_key, connection,
anon):
"""Connect to s3 and return a bucket"""
import boto
if anon is True:
connection = boto.connect_s3(anon=anon)
elif connection is None:
connection = boto.connect_s3(aws_access_key, aws_secret_key)
return connection.get_bucket(bucket_name)
# we need an unmemoized function to call in the main thread. And memoized
# functions for the dask.
_memoized_get_bucket = toolz.memoize(_get_s3_bucket)
def _get_key(bucket_name, conn_args, key_name):
bucket = _memoized_get_bucket(bucket_name, *conn_args)
key = bucket.get_key(key_name)
ext = key_name.split('.')[-1]
return stream_decompress(ext, key.read())
def _parse_s3_URI(bucket_name, paths):
assert bucket_name.startswith('s3://')
o = urlparse('s3://' + quote(bucket_name[len('s3://'):]))
# if path is specified
if (paths == '*') and (o.path != '' and o.path != '/'):
paths = unquote(o.path[1:])
bucket_name = unquote(o.hostname)
return bucket_name, paths
def from_s3(bucket_name, paths='*', aws_access_key=None, aws_secret_key=None,
connection=None, anon=False):
""" Create a Bag by loading textfiles from s3
Each line will be treated as one element and each file in S3 as one
partition.
You may specify a full s3 bucket
>>> b = from_s3('s3://bucket-name') # doctest: +SKIP
Or select files, lists of files, or globstrings of files within that bucket
>>> b = from_s3('s3://bucket-name', 'myfile.json') # doctest: +SKIP
>>> b = from_s3('s3://bucket-name', ['alice.json', 'bob.json']) # doctest: +SKIP
>>> b = from_s3('s3://bucket-name', '*.json') # doctest: +SKIP
"""
conn_args = (aws_access_key, aws_secret_key, connection, anon)
bucket_name, paths = normalize_s3_names(bucket_name, paths, conn_args)
get_key = partial(_get_key, bucket_name, conn_args)
name = next(load_names)
dsk = dict(((name, i), (list, (get_key, k))) for i, k in enumerate(paths))
return Bag(dsk, name, len(paths))
def normalize_s3_names(bucket_name, paths, conn_args):
""" Normalize bucket name and paths """
if bucket_name.startswith('s3://'):
bucket_name, paths = _parse_s3_URI(bucket_name, paths)
if isinstance(paths, str):
if ('*' not in paths) and ('?' not in paths):
return bucket_name, [paths]
else:
bucket = _get_s3_bucket(bucket_name, *conn_args)
keys = bucket.list() # handle globs
matches = [k.name for k in keys if fnmatchcase(k.name, paths)]
return bucket_name, matches
else:
return bucket_name, paths
def from_hdfs(path, hdfs=None, host='localhost', port='50070', user_name=None):
""" Create dask by loading in files from HDFS
Provide an hdfs directory and credentials
>>> b = from_hdfs('home/username/data/', host='localhost', user_name='ubuntu') # doctest: +SKIP
Alternatively provide an instance of ``pywebhdfs.webhdfs.PyWebHdfsClient``
>>> from pywebhdfs.webhdfs import PyWebHdfsClient # doctest: +SKIP
>>> hdfs = PyWebHdfsClient(host='hostname', user_name='username') # doctest: +SKIP
>>> b = from_hdfs('home/username/data/', hdfs=hdfs) # doctest: +SKIP
"""
from .. import hdfs_utils
filenames = hdfs_utils.filenames(hdfs, path)
if not filenames:
raise ValueError("No files found for path %s" % path)
name = next(names)
dsk = dict()
for i, fn in enumerate(filenames):
ext = fn.split('.')[-1]
if ext in ('gz', 'bz2'):
dsk[(name, i)] = (stream_decompress, ext, (hdfs.read_file, fn))
else:
dsk[(name, i)] = (hdfs.read_file, fn)
return Bag(dsk, name, len(dsk))
def stream_decompress(fmt, data):
""" Decompress a block of compressed bytes into a stream of strings """
if fmt == 'gz':
return gzip.GzipFile(fileobj=BytesIO(data))
if fmt == 'bz2':
return bz2_stream(data)
else:
return map(bytes.decode, BytesIO(data))
def bz2_stream(compressed, chunksize=100000):
""" Stream lines from a chunk of compressed bz2 data """
decompressor = bz2.BZ2Decompressor()
for i in range(0, len(compressed), chunksize):
chunk = compressed[i: i+chunksize]
decompressed = decompressor.decompress(chunk).decode()
for line in decompressed.split('\n'):
yield line + '\n'
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create dask from Python sequence
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Example
-------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See also:
from_filenames: Specialized bag creation function for textfiles
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = next(load_names)
d = dict(((name, i), part) for i, part in enumerate(parts))
return Bag(d, name, len(d))
def from_url(urls):
"""Create a dask.bag from a url
>>> a = from_url('http://raw.githubusercontent.com/ContinuumIO/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>> a.take(8) # doctest: +SKIP
('Dask\n',
'====\n',
'\n',
'|Build Status| |Coverage| |Doc Status| |Gitter|\n',
'\n',
'Dask provides multi-core execution on larger-than-memory datasets using blocked\n',
'algorithms and task scheduling. It maps high-level NumPy and list operations\n',
'on large datasets on to graphs of many operations on small in-memory datasets.\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = next(load_names)
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = next(names)
counter = itertools.count(0)
dsk = dict(((name, next(counter)), key) for bag in bags
for key in sorted(bag._keys()))
return Bag(merge(dsk, *[b.dask for b in bags]), name, len(dsk))
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern
Example
-------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
| bsd-3-clause |
ky822/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
adiIspas/Machine-Learning_A-Z | Machine Learning A-Z/Part 3 - Classification/Section 15 - K-Nearest Neighbors (K-NN)/classification_template.py | 37 | 2538 | # Classification template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting classifier to the Training set
# Create your classifier here
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Classifier (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | mit |
glennq/scikit-learn | examples/gaussian_process/plot_compare_gpr_krr.py | 67 | 5191 | """
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using kernel ridge
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| bsd-3-clause |
yanlend/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
elkingtonmcb/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
khiner/aubio | python/demos/demo_tempo_plot.py | 10 | 2520 | #! /usr/bin/env python
import sys
from aubio import tempo, source
win_s = 512 # fft size
hop_s = win_s / 2 # hop size
if len(sys.argv) < 2:
print "Usage: %s <filename> [samplerate]" % sys.argv[0]
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
o = tempo("default", win_s, hop_s, samplerate)
# tempo detection delay, in samples
# default to 4 blocks delay to catch up with
delay = 4. * hop_s
# list of beats, in samples
beats = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
is_beat = o(samples)
if is_beat:
this_beat = o.get_last_s()
beats.append(this_beat)
total_frames += read
if read < hop_s: break
if len(beats) > 1:
# do plotting
from numpy import array, arange, mean, median, diff
import matplotlib.pyplot as plt
bpms = 60./ diff(beats)
print 'mean period:', "%.2f" % mean(bpms), 'bpm', 'median', "%.2f" % median(bpms), 'bpm'
print 'plotting', filename
plt1 = plt.axes([0.1, 0.75, 0.8, 0.19])
plt2 = plt.axes([0.1, 0.1, 0.8, 0.65], sharex = plt1)
plt.rc('lines',linewidth='.8')
for stamp in beats: plt1.plot([stamp, stamp], [-1., 1.], '-r')
plt1.axis(xmin = 0., xmax = total_frames / float(samplerate) )
plt1.xaxis.set_visible(False)
plt1.yaxis.set_visible(False)
# plot actual periods
plt2.plot(beats[1:], bpms, '-', label = 'raw')
# plot moving median of 5 last periods
median_win_s = 5
bpms_median = [ median(bpms[i:i + median_win_s:1]) for i in range(len(bpms) - median_win_s ) ]
plt2.plot(beats[median_win_s+1:], bpms_median, '-', label = 'median of %d' % median_win_s)
# plot moving median of 10 last periods
median_win_s = 20
bpms_median = [ median(bpms[i:i + median_win_s:1]) for i in range(len(bpms) - median_win_s ) ]
plt2.plot(beats[median_win_s+1:], bpms_median, '-', label = 'median of %d' % median_win_s)
plt2.axis(ymin = min(bpms), ymax = max(bpms))
#plt2.axis(ymin = 40, ymax = 240)
plt.xlabel('time (mm:ss)')
plt.ylabel('beats per minute (bpm)')
plt2.set_xticklabels([ "%02d:%02d" % (t/60, t%60) for t in plt2.get_xticks()[:-1]], rotation = 50)
#plt.savefig('/tmp/t.png', dpi=200)
plt2.legend()
plt.show()
else:
print 'mean period:', "%.2f" % 0, 'bpm', 'median', "%.2f" % 0, 'bpm',
print 'nothing to plot, file too short?'
| gpl-3.0 |
ywcui1990/htmresearch | htmresearch/frameworks/layers/l2456_model.py | 3 | 22781 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This class allows to easily create experiments using a L2456 network for
inference over objects. It uses the network API and multiple regions (raw
sensors for sensor and external input, column pooler region, extended temporal
memory region).
Here is a sample use of this class, to learn objects and infer one of them. The
object creation details are TBD.
exp = L2456Model(
name="sample",
numCorticalColumns=2,
)
# Set up objects (TBD)
objects = createObjectMachine()
# Do the learning phase
exp.learnObjects(objects, reset=True)
exp.printProfile()
# Do the inference phase for one object
exp.infer(objects[0], reset=True)
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
plotDir="plots",
)
"""
import os
import random
import collections
import inspect
import cPickle
import matplotlib.pyplot as plt
from tabulate import tabulate
from htmresearch.support.logging_decorator import LoggingDecorator
from htmresearch.support.register_regions import registerAllResearchRegions
from htmresearch.frameworks.layers.laminar_network import createNetwork
def rerunExperimentFromLogfile(logFilename):
"""
Create an experiment class according to the sequence of operations in logFile
and return resulting experiment instance.
"""
callLog = LoggingDecorator.load(logFilename)
# Assume first one is call to constructor
exp = L2456Model(*callLog[0][1]["args"], **callLog[0][1]["kwargs"])
# Call subsequent methods, using stored parameters
for call in callLog[1:]:
method = getattr(exp, call[0])
method(*call[1]["args"], **call[1]["kwargs"])
return exp
class L2456Model(object):
"""
L2456 experiment.
This experiment uses the network API to test out various properties of
inference and learning using a sensors and an L4-L2 network. For now,
we directly use the locations on the object.
"""
@LoggingDecorator()
def __init__(self,
name,
numCorticalColumns=1,
L2Overrides={},
L4Overrides={},
L5Overrides={},
L6Overrides={},
numLearningPoints=3,
seed=42,
logCalls = False
):
"""
Creates the network.
Parameters:
----------------------------
@param name (str)
Experiment name
@param numCorticalColumns (int)
Number of cortical columns in the network
@param L2Overrides (dict)
Parameters to override in the L2 region
@param L4Overrides (dict)
Parameters to override in the L4 region
@param L5Overrides (dict)
Parameters to override in the L5 region
@param L6Overrides (dict)
Parameters to override in the L6 region
@param numLearningPoints (int)
Number of times each pair should be seen to be learnt
@param logCalls (bool)
If true, calls to main functions will be logged internally. The
log can then be saved with saveLogs(). This allows us to recreate
the complete network behavior using rerunExperimentFromLogfile
which is very useful for debugging.
"""
# Handle logging - this has to be done first
self.logCalls = logCalls
registerAllResearchRegions()
self.name = name
self.numLearningPoints = numLearningPoints
self.numColumns = numCorticalColumns
self.sensorInputSize = 2048
self.numInputBits = 40
# seed
self.seed = seed
random.seed(seed)
# Get network parameters and update with overrides
self.config = {
"networkType": "L2456Columns",
"numCorticalColumns": numCorticalColumns,
"randomSeedBase": self.seed,
}
self.config.update(self.getDefaultParams())
self.config["L2Params"].update(L2Overrides)
self.config["L4Params"].update(L4Overrides)
self.config["L5Params"].update(L5Overrides)
self.config["L6Params"].update(L6Overrides)
# create network and retrieve regions
self.network = createNetwork(self.config)
self._retrieveRegions()
# will be populated during training
self.objectRepresentationsL2 = {}
self.objectRepresentationsL5 = {}
self.statistics = []
@LoggingDecorator()
def learnObjects(self, objects, reset=True):
"""
Learns all provided objects, and optionally resets the network.
The provided objects must have the canonical learning format, which is the
following.
objects should be a dict objectName: sensationList, where the sensationList
is a list of sensations, and each sensation is a mapping from cortical
column to a tuple of three SDR's respectively corresponding to the
locationInput, the coarseSensorInput, and the sensorInput.
The model presents each sensation for numLearningPoints iterations before
moving on to the next sensation. Once the network has been trained on an
object, the L2 and L5 representations for it are stored. A reset signal is
sent whenever there is a new object if reset=True.
An example input is as follows, assuming we are learning a simple object
with a sequence of two sensations (with very few active bits for
simplicity):
objects = {
"simple": [
{
# location, coarse feature, fine feature for CC0, sensation 1
0: ( [1, 5, 10], [9, 32, 75], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 1
1: ( [6, 2, 15], [11, 42, 92], [7, 11, 50] ),
},
{
# location, coarse feature, fine feature for CC0, sensation 2
0: ( [2, 9, 10], [10, 35, 78], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 2
1: ( [1, 4, 12], [10, 32, 52], [6, 10, 52] ),
},
]
}
In many uses cases, this object can be created by implementations of
ObjectMachines (cf htm.research.object_machine_factory), through their
method providedObjectsToLearn.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
"""
self._setLearningMode()
for objectName, sensationList in objects.iteritems():
# ignore empty sensation lists
if len(sensationList) == 0:
continue
# keep track of numbers of iterations to run
iterations = 0
for sensations in sensationList:
# learn each pattern multiple times
for _ in xrange(self.numLearningPoints):
for col in xrange(self.numColumns):
location, coarseFeature, fineFeature = sensations[col]
self.locationInputs[col].addDataToQueue(list(location), 0, 0)
self.coarseSensors[col].addDataToQueue(list(coarseFeature), 0, 0)
self.sensors[col].addDataToQueue(list(fineFeature), 0, 0)
iterations += 1
# actually learn the objects
if iterations > 0:
self.network.run(iterations)
# update L2 and L5 representations for this object
self.objectRepresentationsL2[objectName] = self.getL2Representations()
self.objectRepresentationsL5[objectName] = self.getL5Representations()
if reset:
# send reset signal
self._sendReset()
@LoggingDecorator()
def infer(self, sensationList, reset=True, objectName=None):
"""
Infer on a given set of sensations for a single object.
The provided sensationList is a list of sensations, and each sensation is a
mapping from cortical column to a tuple of three SDR's respectively
corresponding to the locationInput, the coarseSensorInput, and the
sensorInput.
For example, the input can look as follows, if we are inferring a simple
object with two sensations (with very few active bits for simplicity):
sensationList = [
{
# location, coarse feature, fine feature for CC0, sensation 1
0: ( [1, 5, 10], [9, 32, 75], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 1
1: ( [6, 2, 15], [11, 42, 92], [7, 11, 50] ),
},
{
# location, coarse feature, fine feature for CC0, sensation 2
0: ( [2, 9, 10], [10, 35, 78], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 2
1: ( [1, 4, 12], [10, 32, 52], [6, 10, 52] ),
},
]
If the object is known by the caller, an object name can be specified
as an optional argument, and must match the objects given while learning.
This is used later when evaluating inference statistics.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
@param objectName (str)
Name of the objects (must match the names given during learning).
"""
self._unsetLearningMode()
statistics = collections.defaultdict(list)
if objectName is not None:
if objectName not in self.objectRepresentationsL2:
raise ValueError("The provided objectName was not given during"
" learning")
for sensations in sensationList:
# feed all columns with sensations
for col in xrange(self.numColumns):
location, coarseFeature, fineFeature = sensations[col]
self.locationInputs[col].addDataToQueue(list(location), 0, 0)
self.coarseSensors[col].addDataToQueue(list(coarseFeature), 0, 0)
self.sensors[col].addDataToQueue(list(fineFeature), 0, 0)
self.network.run(1)
self._updateInferenceStats(statistics, objectName)
if reset:
# send reset signal
self._sendReset()
# save statistics
statistics["numSteps"] = len(sensationList)
statistics["object"] = objectName if objectName is not None else "Unknown"
self.statistics.append(statistics)
@LoggingDecorator()
def sendReset(self, *args, **kwargs):
"""
Public interface to sends a reset signal to the network. This is logged.
"""
self._sendReset(*args, **kwargs)
def _sendReset(self, sequenceId=0):
"""
Sends a reset signal to the network.
"""
# Handle logging - this has to be done first
if self.logCalls:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
values.pop('frame')
values.pop('self')
(_, filename,
_, _, _, _) = inspect.getouterframes(inspect.currentframe())[1]
if os.path.splitext(os.path.basename(__file__))[0] != \
os.path.splitext(os.path.basename(filename))[0]:
self.callLog.append([inspect.getframeinfo(frame)[2], values])
for col in xrange(self.numColumns):
self.locationInputs[col].addResetToQueue(sequenceId)
self.coarseSensors[col].addResetToQueue(sequenceId)
self.sensors[col].addResetToQueue(sequenceId)
self.network.run(1)
def plotInferenceStats(self,
fields,
plotDir="plots",
experimentID=0,
onePlot=True):
"""
Plots and saves the desired inference statistics.
Parameters:
----------------------------
@param fields (list(str))
List of fields to include in the plots
@param experimentID (int)
ID of the experiment (usually 0 if only one was conducted)
@param onePlot (bool)
If true, all cortical columns will be merged in one plot.
"""
# TODO: implement it once learning and inference are working
raise RuntimeError("Unimplemented method")
def getInferenceStats(self, experimentID=None):
"""
Returns the statistics for the desired experiment. If experimentID is None
return all statistics.
Parameters:
----------------------------
@param experimentID (int)
Each time you call infer() you get a new set of inference
statistics. experimentID refers to which call you want stats for
(usually 0 if only one was conducted).
"""
if experimentID is None:
return self.statistics
else:
return self.statistics[experimentID]
def printProfile(self, reset=False):
"""
Prints profiling information.
Parameters:
----------------------------
@param reset (bool)
If set to True, the profiling will be reset.
"""
print "Profiling information for {}".format(type(self).__name__)
totalTime = 0.000001
for region in self.network.regions.values():
timer = region.getComputeTimer()
totalTime += timer.getElapsed()
# Sort the region names
regionNames = list(self.network.regions.keys())
regionNames.sort()
count = 1
profileInfo = []
L2Time = 0.0
L4Time = 0.0
for regionName in regionNames:
region = self.network.regions[regionName]
timer = region.getComputeTimer()
count = max(timer.getStartCount(), count)
profileInfo.append([region.name,
timer.getStartCount(),
timer.getElapsed(),
100.0 * timer.getElapsed() / totalTime,
timer.getElapsed() / max(timer.getStartCount(), 1)])
if "L2Column" in regionName:
L2Time += timer.getElapsed()
elif "L4Column" in regionName:
L4Time += timer.getElapsed()
profileInfo.append(
["Total time", "", totalTime, "100.0", totalTime / count])
print tabulate(profileInfo, headers=["Region", "Count",
"Elapsed", "Pct of total",
"Secs/iteration"],
tablefmt="grid", floatfmt="6.3f")
print
print "Total time in L2 =", L2Time
print "Total time in L4 =", L4Time
if reset:
self.resetProfile()
def resetProfile(self):
"""
Resets the network profiling.
"""
self.network.resetProfiling()
def getL4Representations(self):
"""
Returns the active representation in L4.
"""
return [set(column._tm.getActiveCells()) for column in self.L4Columns]
def getL4PredictedCells(self):
"""
Returns the predicted cells in L4.
"""
return [set(column._tm.getPredictedCells()) for column in self.L4Columns]
def getL2Representations(self):
"""
Returns a list of active cells in L2 for each column.
"""
return [set(column._pooler.getActiveCells()) for column in self.L2Columns]
def getL5Representations(self):
"""
Returns a list of active cells in L5 for each column.
"""
return [set(column._pooler.getActiveCells()) for column in self.L5Columns]
def getL6Representations(self):
"""
Returns the active representation in L4.
"""
return [set(column._tm.getActiveCells()) for column in self.L6Columns]
def getL6PredictedCells(self):
"""
Returns the predicted cells in L4.
"""
return [set(column._tm.getPredictedCells()) for column in self.L6Columns]
def getDefaultParams(self):
"""
Returns a good default set of parameters to use in L2456 regions
"""
return {
"sensorParams": {
"outputWidth": self.sensorInputSize,
},
"coarseSensorParams": {
"outputWidth": self.sensorInputSize,
},
"locationParams": {
"activeBits": 41,
"outputWidth": self.sensorInputSize,
"radius": 2,
"verbosity": 0,
},
"L4Params": {
"columnCount": self.sensorInputSize,
"cellsPerColumn": 8,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": 10,
"basalPredictedSegmentDecrement": 0.002,
"activationThreshold": 13,
"sampleSize": 20,
"implementation": "ApicalTiebreakCPP",
},
"L2Params": {
"inputWidth": self.sensorInputSize * 8,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 10,
"sampleSizeProximal": 20,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 0.6667,
"learningMode": True,
},
"L6Params": {
"columnCount": self.sensorInputSize,
"cellsPerColumn": 8,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": 10,
"basalPredictedSegmentDecrement": 0.004,
"activationThreshold": 13,
"sampleSize": 20,
},
"L5Params": {
"inputWidth": self.sensorInputSize * 8,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 10,
"sampleSizeProximal": 20,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 0.6667,
"learningMode": True,
},
}
def _retrieveRegions(self):
"""
Retrieve and store Python region instances for each column
"""
self.sensors = []
self.coarseSensors = []
self.locationInputs = []
self.L4Columns = []
self.L2Columns = []
self.L5Columns = []
self.L6Columns = []
for i in xrange(self.numColumns):
self.sensors.append(
self.network.regions["sensorInput_" + str(i)].getSelf()
)
self.coarseSensors.append(
self.network.regions["coarseSensorInput_" + str(i)].getSelf()
)
self.locationInputs.append(
self.network.regions["locationInput_" + str(i)].getSelf()
)
self.L4Columns.append(
self.network.regions["L4Column_" + str(i)].getSelf()
)
self.L2Columns.append(
self.network.regions["L2Column_" + str(i)].getSelf()
)
self.L5Columns.append(
self.network.regions["L5Column_" + str(i)].getSelf()
)
self.L6Columns.append(
self.network.regions["L6Column_" + str(i)].getSelf()
)
def _unsetLearningMode(self):
"""
Unsets the learning mode, to start inference.
"""
for column in self.L4Columns:
column.setParameter("learn", 0, False)
for column in self.L6Columns:
column.setParameter("learn", 0, False)
for column in self.L2Columns:
column.setParameter("learningMode", 0, False)
for column in self.L5Columns:
column.setParameter("learningMode", 0, False)
def _setLearningMode(self):
"""
Sets the learning mode.
"""
for column in self.L4Columns:
column.setParameter("learn", 0, True)
for column in self.L6Columns:
column.setParameter("learn", 0, True)
for column in self.L2Columns:
column.setParameter("learningMode", 0, True)
for column in self.L5Columns:
column.setParameter("learningMode", 0, True)
def _updateInferenceStats(self, statistics, objectName=None):
"""
Updates the inference statistics.
Parameters:
----------------------------
@param statistics (dict)
Dictionary in which to write the statistics
@param objectName (str)
Name of the inferred object, if known. Otherwise, set to None.
"""
L4Representations = self.getL4Representations()
L4PredictedCells = self.getL4PredictedCells()
L2Representations = self.getL2Representations()
L5Representations = self.getL5Representations()
L6Representations = self.getL6Representations()
L6PredictedCells = self.getL6PredictedCells()
for i in xrange(self.numColumns):
statistics["L4 Representation C" + str(i)].append(
len(L4Representations[i])
)
statistics["L4 Predicted C" + str(i)].append(
len(L4PredictedCells[i])
)
statistics["L2 Representation C" + str(i)].append(
len(L2Representations[i])
)
statistics["L6 Representation C" + str(i)].append(
len(L6Representations[i])
)
statistics["L6 Predicted C" + str(i)].append(
len(L6PredictedCells[i])
)
statistics["L5 Representation C" + str(i)].append(
len(L5Representations[i])
)
# add true overlap if objectName was provided
if objectName is not None:
objectRepresentationL2 = self.objectRepresentationsL2[objectName]
statistics["Overlap L2 with object C" + str(i)].append(
len(objectRepresentationL2[i] & L2Representations[i])
)
objectRepresentationL5 = self.objectRepresentationsL5[objectName]
statistics["Overlap L5 with object C" + str(i)].append(
len(objectRepresentationL5[i] & L5Representations[i])
)
| agpl-3.0 |
Kebniss/TalkingData-Mobile-User-Demographics | src/models/randomforest.py | 1 | 2720 | """ This script loads the dense training data, encodes the target labels and
trains a random forest model using CV. The best estimator is saved"""
''' THIS MODEL WITH MY FEATURES OBTAINS A SCORE OF 2.38088 ON KAGGLE'''
import os
import pickle
import numpy as np
import pandas as pd
from os import path
import seaborn as sns
from time import time
from scipy import sparse, io
import matplotlib.pyplot as plt
from sklearn.metrics import log_loss
from dotenv import load_dotenv, find_dotenv
from scipy.sparse import csr_matrix, hstack
from sklearn.preprocessing import LabelEncoder
from sklearn.grid_search import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics import make_scorer, f1_score, confusion_matrix
from sklearn.calibration import calibration_curve, CalibratedClassifierCV
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
RAW_DATA_DIR = os.environ.get("RAW_DATA_DIR")
FEATURES_DATA_DIR = os.environ.get("FEATURES_DIR")
MODELS_DIR = os.environ.get("MODELS_DIR")
data = pd.read_csv(path.join(FEATURES_DATA_DIR, 'dense_train_p_al_d.csv'))
gatrain = pd.read_csv(os.path.join(RAW_DATA_DIR,'gender_age_train.csv'),
index_col='device_id')
labels = gatrain['group']
targetencoder = LabelEncoder().fit(labels) # encoding target labels
labels = targetencoder.transform(labels)
nclasses = len(targetencoder.classes_)
with open(path.join(FEATURES_DATA_DIR, 'targetencoder_rf.pkl'), 'wb') as f:
pickle.dump(targetencoder, f) # saving the labels to unpack after prediction
X, X_calibration, y, y_calibration = train_test_split(data,
labels,
test_size=0.20,
random_state=0)
parameters = {'max_depth': (3, 4, 5, 6, 7, 8, 9, 10, 11),
'min_samples_split': (50, 100, 500, 1000),
'max_features': (30, 50, 100, 150, 200, 300, 500)}
f1_scorer = make_scorer(f1_score, greater_is_better=True, average='weighted')
rfc = RandomForestClassifier(n_estimators=200, n_jobs=4)
clf = RandomizedSearchCV(rfc, # select the best hyperparameters
parameters,
n_jobs=4,
n_iter=40,
random_state=42,
scoring=f1_scorer)
clf.fit(X, y)
clf.best_params_
# calibrating the model
sig_clf = CalibratedClassifierCV(clf.best_estimator_, method='sigmoid', cv='prefit')
sig_clf.fit(X_calibration, y_calibration)
with open(path.join(MODELS_DIR, 'rfc_500.pkl'), 'wb') as f:
pickle.dump(sig_clf, f)
| mit |
ankurankan/scikit-learn | sklearn/cross_validation.py | 1 | 62902 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import _is_arraylike, _num_samples, check_array
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices, shuffle, random_state):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, indices, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = np.bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None, n_bootstraps=None):
# See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation
# behind this deprecation
warnings.warn("Bootstrap will no longer be supported as a " +
"cross-validation method as of version 0.15 and " +
"will be removed in 0.17", DeprecationWarning)
self.n = n
if n_bootstraps is not None: # pragma: no cover
warnings.warn("n_bootstraps was renamed to n_iter and will "
"be removed in 0.16.", DeprecationWarning)
n_iter = n_bootstraps
self.n_iter = n_iter
if (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
elif isinstance(train_size, numbers.Integral):
self.train_size = train_size
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n - self.train_size:
raise ValueError(("test_size + train_size=%d, should not be " +
"larger than n=%d") %
(self.test_size + self.train_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(np.bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = np.bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(np.bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
##############################################################################
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
n_samples = _num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, (v.tocsr()[train] if sp.issparse(v)
else np.asarray(v)[train])
if _is_arraylike(v) and _num_samples(v) == n_samples
else v)
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
cv = KFold(_num_samples(y), cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.")
force_arrays = options.pop('force_arrays', False)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if force_arrays:
warnings.warn("The force_arrays option is deprecated and will be "
"removed in 0.18.", DeprecationWarning)
arrays = [check_array(x, 'csr', ensure_2d=False,
force_all_finite=False) if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
lorenzo-desantis/mne-python | mne/viz/epochs.py | 2 | 60756 | """Functions to plot epochs data
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
#
# License: Simplified BSD
from functools import partial
import copy
import numpy as np
from ..utils import verbose, get_config, set_config, deprecated
from ..utils import logger
from ..io.pick import pick_types, channel_type
from ..io.proj import setup_proj
from ..fixes import Counter, _in1d
from ..time_frequency import compute_epochs_psd
from .utils import tight_layout, figure_nobar, _toggle_proj
from .utils import _toggle_options, _layout_figure, _setup_vmin_vmax
from .utils import _channels_changed, _plot_raw_onscroll, _onclick_help
from ..defaults import _handle_default
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r', fig=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs
picks : int | array-like of int | None
The indices of the channels to consider. If None, all good
data channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
colorbar : bool
Display or not a colorbar
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
Returns
-------
figs : the list of matplotlib figures
One figure per channel displayed
"""
from scipy import ndimage
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
if fig is not None and len(picks) > 1:
raise ValueError('Only single pick can be drawn to a figure.')
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None:
this_data = this_data[this_order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma,
axis=0)
plt.figure(this_fig.number)
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
if scale_vmin:
vmin *= scalings[ch_type]
if scale_vmax:
vmax *= scalings[ch_type]
im = ax1.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, len(data)],
aspect='auto', origin='lower', interpolation='nearest',
vmin=vmin, vmax=vmax, cmap=cmap)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel('Time (ms)')
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
if show:
plt.show()
return figs
@deprecated('`plot_image_epochs` is deprecated and will be removed in '
'"MNE 0.11." Please use plot_epochs_image instead')
def plot_image_epochs(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r', fig=None):
return plot_epochs_image(epochs=epochs, picks=picks, sigma=sigma,
vmin=vmin, vmax=None, colorbar=True, order=order,
show=show, units=None, scalings=None, cmap=cmap,
fig=fig)
def _drop_log_stats(drop_log, ignore=['IGNORED']):
"""
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
# XXX: This function should be moved to epochs.py after
# removal of perc return parameter in plot_drop_log()
if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
raise ValueError('drop_log must be a list of lists')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
counts = 100 * np.array(list(scores.values()), dtype=float) / len(drop_log)
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
if show:
plt.show()
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Aux functioin"""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
for l, d in zip(ax.lines, data_[good_ch_idx]):
l.set_data(times, d)
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
for l, d in zip(bad_lines, data_[bad_ch_idx]):
l.set_data(times, d)
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks(list())
ax.set_xticks(list())
if vars(ax)[this]['reject'] is True:
# memorizing reject
for l in ax.lines:
l.set_color((0.8, 0.8, 0.8))
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
for l in ax.lines[:len(good_ch_idx)]:
l.set_color('k')
if bad_ch_idx is not None:
for l in ax.lines[-len(bad_ch_idx):]:
l.set_color('r')
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop_epochs(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for l in ax.lines:
l.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
for l in good_lines:
l.set_color('k')
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
for l in bad_lines:
l.set_color('r')
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20,
n_channels=20, title=None, show=True, block=False):
""" Visualize epochs
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
show : bool
Show figure if True. Defaults to True
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
With trellis set to False, the arrow keys (up/down/left/right) can
be used to navigate between channels and epochs and the scaling can be
adjusted with - and + (or =) keys, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
Full screen mode can be to toggled with f11 key. The amount of epochs and
channels per view can be adjusted with home/end and page down/page up keys.
Butterfly plot can be toggled with ``b`` key. Right mouse click adds a
vertical line to the plot.
"""
import matplotlib.pyplot as plt
scalings = _handle_default('scalings_plot_raw', scalings)
projs = epochs.info['projs']
params = {'epochs': epochs,
'orig_data': np.concatenate(epochs.get_data(), axis=1),
'info': copy.deepcopy(epochs.info),
'bad_color': (0.8, 0.8, 0.8),
't_start': 0}
params['label_click_fun'] = partial(_pick_bad_channels, params=params)
_prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
if show:
try:
plt.show(block=block)
except TypeError: # not all versions have this
plt.show()
return params['fig']
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, proj=False, n_fft=256,
picks=None, ax=None, color='black', area_mode='std',
area_alpha=0.33, n_overlap=0,
dB=True, n_jobs=1, show=True, verbose=None):
"""Plot the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
from .raw import _set_psd_plot_params
fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
epochs.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = compute_epochs_psd(epochs, picks=picks, fmin=fmin,
fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, proj=proj,
n_jobs=n_jobs)
# Convert PSDs to dB
if dB:
psds = 10 * np.log10(psds)
unit = 'dB'
else:
unit = 'power'
# mean across epochs and channels
psd_mean = np.mean(psds, axis=0).mean(axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(np.mean(psds, axis=0), axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
np.max(np.mean(psds, axis=0), axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Freq (Hz)')
if ii == len(picks_list) // 2:
ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
ax.set_title(title)
ax.set_xlim(freqs[0], freqs[-1])
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
if show:
plt.show()
return fig
def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, order=None):
"""Helper for setting up the mne_browse_epochs window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
epochs = params['epochs']
if picks is None:
picks = _handle_picks(epochs)
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
picks = sorted(picks)
# Reorganize channels
inds = list()
types = list()
for t in ['grad', 'mag']:
idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if order is None:
order = ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp', 'misc',
'chpi', 'syst', 'ias', 'exci']
for ch_type in order:
pick_kwargs[ch_type] = True
idxs = pick_types(params['info'], **pick_kwargs)
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [ch_type] * len(inds[-1])
pick_kwargs[ch_type] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(picks):
raise RuntimeError('Some channels not classified. Please'
' check your picks')
ch_names = [params['info']['ch_names'][x] for x in inds]
# set up plotting
size = get_config('MNE_BROWSE_RAW_SIZE')
n_epochs = min(n_epochs, len(epochs.events))
duration = len(epochs.times) * n_epochs
n_channels = min(n_channels, len(picks))
if size is not None:
size = size.split(',')
size = tuple(float(s) for s in size)
if title is None:
title = epochs.name
if epochs.name is None or len(title) == 0:
title = ''
fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
fig.canvas.set_window_title('mne_browse_epochs')
ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
ha='center', va='bottom', size=12, xycoords='axes fraction',
textcoords='offset points')
color = _handle_default('color', None)
ax.axis([0, duration, 0, 200])
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.axis([0, duration, 0, 200])
ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Epochs')
ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
ax_vscroll.set_axis_off()
ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
facecolor='w', zorder=2))
ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
help_button = mpl.widgets.Button(ax_help_button, 'Help')
help_button.on_clicked(partial(_onclick_help, params=params))
# populate vertical and horizontal scrollbars
for ci in range(len(picks)):
if ch_names[ci] in params['info']['bads']:
this_color = params['bad_color']
else:
this_color = color[types[ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color,
zorder=3))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
edgecolor='w', facecolor='w', zorder=4)
ax_vscroll.add_patch(vsel_patch)
ax_vscroll.set_ylim(len(types), 0)
ax_vscroll.set_title('Ch.')
# populate colors list
type_colors = [colorConverter.to_rgba(color[c]) for c in types]
colors = list()
for color_idx in range(len(type_colors)):
colors.append([type_colors[color_idx]] * len(epochs.events))
lines = list()
n_times = len(epochs.times)
for ch_idx in range(n_channels):
if len(colors) - 1 < ch_idx:
break
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
ax.add_collection(lc)
lines.append(lc)
times = epochs.times
data = np.zeros((params['info']['nchan'], len(times) * len(epochs.events)))
ylim = (25., 0.) # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
# make shells for plotting traces
offset = ylim[0] / n_channels
offsets = np.arange(n_channels) * offset + (offset / 2.)
times = np.arange(len(data[0]))
epoch_times = np.arange(0, len(times), n_times)
ax.set_yticks(offsets)
ax.set_ylim(ylim)
ticks = epoch_times + 0.5 * n_times
ax.set_xticks(ticks)
ax2.set_xticks(ticks[:n_epochs])
labels = list(range(1, len(ticks) + 1)) # epoch numbers
ax.set_xticklabels(labels)
ax2.set_xticklabels(labels)
xlim = epoch_times[-1] + len(epochs.times)
ax_hscroll.set_xlim(0, xlim)
vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
# fit horizontal scroll bar ticks
hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
hticks = list()
for tick in hscroll_ticks:
hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
hlabels = [x / n_times + 1 for x in hticks]
ax_hscroll.set_xticks(hticks)
ax_hscroll.set_xticklabels(hlabels)
for epoch_idx in range(len(epoch_times)):
ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
n_times, 1, facecolor='w',
edgecolor='w', alpha=0.6))
hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
text = ax.text(0, 0, 'blank', zorder=2, verticalalignment='baseline',
ha='left', fontweight='bold')
text.set_visible(False)
params.update({'fig': fig,
'ax': ax,
'ax2': ax2,
'ax_hscroll': ax_hscroll,
'ax_vscroll': ax_vscroll,
'vsel_patch': vsel_patch,
'hsel_patch': hsel_patch,
'lines': lines,
'projs': projs,
'ch_names': ch_names,
'n_channels': n_channels,
'n_epochs': n_epochs,
'scalings': scalings,
'duration': duration,
'ch_start': 0,
'colors': colors,
'def_colors': type_colors, # don't change at runtime
'picks': picks,
'bads': np.array(list(), dtype=int),
'data': data,
'times': times,
'epoch_times': epoch_times,
'offsets': offsets,
'labels': labels,
'scale_factor': 1.0,
'butterfly_scale': 1.0,
'fig_proj': None,
'types': np.array(types),
'inds': inds,
'vert_lines': list(),
'vertline_t': vertline_t,
'butterfly': False,
'text': text,
'ax_help_button': ax_help_button, # needed for positioning
'help_button': help_button, # reference needed for clicks
'fig_options': None,
'settings': [True, True, True, True],
'image_plot': None})
params['plot_fun'] = partial(_plot_traces, params=params)
if len(projs) > 0 and not epochs.proj:
ax_button = plt.subplot2grid((10, 15), (9, 14))
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['ax_button'] = ax_button
# callbacks
callback_scroll = partial(_plot_onscroll, params=params)
fig.canvas.mpl_connect('scroll_event', callback_scroll)
callback_click = partial(_mouse_click, params=params)
fig.canvas.mpl_connect('button_press_event', callback_click)
callback_key = partial(_plot_onkey, params=params)
fig.canvas.mpl_connect('key_press_event', callback_key)
callback_resize = partial(_resize_event, params=params)
fig.canvas.mpl_connect('resize_event', callback_resize)
fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
# Draw event lines for the first time.
_plot_vert_lines(params)
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
params['callback_key'] = callback_key
callback_proj('none')
_layout_figure(params)
def _plot_traces(params):
""" Helper for plotting concatenated epochs """
params['text'].set_visible(False)
ax = params['ax']
butterfly = params['butterfly']
if butterfly:
ch_start = 0
n_channels = len(params['picks'])
data = params['data'] * params['butterfly_scale']
else:
ch_start = params['ch_start']
n_channels = params['n_channels']
data = params['data'] * params['scale_factor']
offsets = params['offsets']
lines = params['lines']
epochs = params['epochs']
n_times = len(epochs.times)
tick_list = list()
start_idx = int(params['t_start'] / n_times)
end = params['t_start'] + params['duration']
end_idx = int(end / n_times)
xlabels = params['labels'][start_idx:]
event_ids = params['epochs'].events[:, 2]
params['ax2'].set_xticklabels(event_ids[start_idx:])
ax.set_xticklabels(xlabels)
ylabels = ax.yaxis.get_ticklabels()
# do the plotting
for line_idx in range(n_channels):
ch_idx = line_idx + ch_start
if line_idx >= len(lines):
break
elif ch_idx < len(params['ch_names']):
if butterfly:
ch_type = params['types'][ch_idx]
if ch_type == 'grad':
offset = offsets[0]
elif ch_type == 'mag':
offset = offsets[1]
elif ch_type == 'eeg':
offset = offsets[2]
elif ch_type == 'eog':
offset = offsets[3]
elif ch_type == 'ecg':
offset = offsets[4]
else:
lines[line_idx].set_segments(list())
else:
tick_list += [params['ch_names'][ch_idx]]
offset = offsets[line_idx]
this_data = data[ch_idx][params['t_start']:end]
# subtraction here gets correct orientation for flipped ylim
ydata = offset - this_data
xdata = params['times'][:params['duration']]
num_epochs = np.min([params['n_epochs'],
len(epochs.events)])
segments = np.split(np.array((xdata, ydata)).T, num_epochs)
ch_name = params['ch_names'][ch_idx]
if ch_name in params['info']['bads']:
if not butterfly:
this_color = params['bad_color']
ylabels[line_idx].set_color(this_color)
this_color = np.tile((params['bad_color']), (num_epochs, 1))
for bad_idx in params['bads']:
if bad_idx < start_idx or bad_idx > end_idx:
continue
this_color[bad_idx - start_idx] = (1., 0., 0.)
lines[line_idx].set_zorder(1)
else:
this_color = params['colors'][ch_idx][start_idx:end_idx]
lines[line_idx].set_zorder(2)
if not butterfly:
ylabels[line_idx].set_color('black')
lines[line_idx].set_segments(segments)
lines[line_idx].set_color(this_color)
else:
lines[line_idx].set_segments(list())
# finalize plot
ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
False)
params['ax2'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
if butterfly:
factor = -1. / params['butterfly_scale']
labels = np.empty(20, dtype='S15')
labels.fill('')
ticks = ax.get_yticks()
idx_offset = 1
if 'grad' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[0]) *
params['scalings']['grad'] *
1e13 * factor)
idx_offset += 4
if 'mag' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[1]) *
params['scalings']['mag'] *
1e15 * factor)
idx_offset += 4
if 'eeg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[2]) *
params['scalings']['eeg'] *
1e6 * factor)
idx_offset += 4
if 'eog' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[3]) *
params['scalings']['eog'] *
1e6 * factor)
idx_offset += 4
if 'ecg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[4]) *
params['scalings']['ecg'] *
1e6 * factor)
ax.set_yticklabels(labels, fontsize=12, color='black')
else:
ax.set_yticklabels(tick_list, fontsize=12)
params['vsel_patch'].set_y(ch_start)
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _plot_update_epochs_proj(params, bools):
"""Helper only needs to be called when proj is changed"""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
verbose=False)
data = params['orig_data']
if params['projector'] is not None:
data = np.dot(params['projector'], data)
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _handle_picks(epochs):
"""Aux function to handle picks."""
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
ref_meg=False, exclude=[])
return picks
def _plot_window(value, params):
"""Deal with horizontal shift of the viewport."""
max_times = len(params['times']) - params['duration']
if value > max_times:
value = len(params['times']) - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
params['plot_fun']()
def _plot_vert_lines(params):
""" Helper function for plotting vertical lines."""
ax = params['ax']
while len(ax.lines) > 0:
ax.lines.pop()
params['vert_lines'] = list()
params['vertline_t'].set_text('')
epochs = params['epochs']
if params['settings'][3]: # if zeroline visible
t_zero = np.where(epochs.times == 0.)[0]
if len(t_zero) == 1:
for event_idx in range(len(epochs.events)):
pos = [event_idx * len(epochs.times) + t_zero[0],
event_idx * len(epochs.times) + t_zero[0]]
ax.plot(pos, ax.get_ylim(), 'g', zorder=3, alpha=0.4)
for epoch_idx in range(len(epochs.events)):
pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=1)
def _pick_bad_epochs(event, params):
"""Helper for selecting / dropping bad epochs"""
if 'ica' in params:
pos = (event.xdata, event.ydata)
_pick_bad_channels(pos, params)
return
n_times = len(params['epochs'].times)
start_idx = int(params['t_start'] / n_times)
xdata = event.xdata
xlim = event.inaxes.get_xlim()
epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
total_epochs = len(params['epochs'].events)
if epoch_idx > total_epochs - 1:
return
# remove bad epoch
if epoch_idx in params['bads']:
params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
params['ax_hscroll'].patches[epoch_idx].set_color('w')
params['ax_hscroll'].patches[epoch_idx].set_zorder(1)
params['plot_fun']()
return
# add bad epoch
params['bads'] = np.append(params['bads'], epoch_idx)
params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
params['plot_fun']()
def _pick_bad_channels(pos, params):
"""Helper function for selecting bad channels."""
text, ch_idx = _label2idx(params, pos)
if text is None:
return
if text in params['info']['bads']:
while text in params['info']['bads']:
params['info']['bads'].remove(text)
color = params['def_colors'][ch_idx]
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
else:
params['info']['bads'].append(text)
color = params['bad_color']
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
if 'ica' in params:
params['plot_fun']()
else:
params['plot_update_proj_callback'](params, None)
def _plot_onscroll(event, params):
"""Function to handle scroll events."""
if event.key == 'control':
if event.step < 0:
event.key = '-'
else:
event.key = '+'
_plot_onkey(event, params)
return
if params['butterfly']:
return
_plot_raw_onscroll(event, params, len(params['ch_names']))
def _mouse_click(event, params):
"""Function to handle mouse click events."""
if event.inaxes is None:
if params['butterfly'] or not params['settings'][0]:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
return
if event.button == 1: # left click
params['label_click_fun'](pos)
elif event.button == 3: # right click
if 'ica' not in params:
_, ch_idx = _label2idx(params, pos)
if ch_idx is None:
return
if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
'eeg', 'eog']:
logger.info('Event related fields / potentials only '
'available for MEG and EEG channels.')
return
fig = plot_epochs_image(params['epochs'],
picks=params['inds'][ch_idx],
fig=params['image_plot'])[0]
params['image_plot'] = fig
elif event.button == 1: # left click
# vertical scroll bar changed
if event.inaxes == params['ax_vscroll']:
if params['butterfly']:
return
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scroll bar changed
elif event.inaxes == params['ax_hscroll']:
# find the closest epoch time
times = params['epoch_times']
offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
_plot_window(xdata, params)
# main axes
elif event.inaxes == params['ax']:
_pick_bad_epochs(event, params)
elif event.inaxes == params['ax'] and event.button == 2: # middle click
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
elif event.inaxes == params['ax'] and event.button == 3: # right click
n_times = len(params['epochs'].times)
xdata = int(event.xdata % n_times)
prev_xdata = 0
if len(params['vert_lines']) > 0:
prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
while len(params['vert_lines']) > 0:
params['ax'].lines.remove(params['vert_lines'][0][0])
params['vert_lines'].pop(0)
if prev_xdata == xdata: # lines removed
params['vertline_t'].set_text('')
params['plot_fun']()
return
ylim = params['ax'].get_ylim()
for epoch_idx in range(params['n_epochs']): # plot lines
pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
zorder=4))
params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
params['plot_fun']()
def _plot_onkey(event, params):
"""Function to handle key presses."""
import matplotlib.pyplot as plt
if event.key == 'down':
if params['butterfly']:
return
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'up':
if params['butterfly']:
return
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'left':
sample = params['t_start'] - params['duration']
sample = np.max([0, sample])
_plot_window(sample, params)
elif event.key == 'right':
sample = params['t_start'] + params['duration']
sample = np.min([sample, params['times'][-1] - params['duration']])
times = params['epoch_times']
xdata = times.flat[np.abs(times - sample).argmin()]
_plot_window(xdata, params)
elif event.key == '-':
if params['butterfly']:
params['butterfly_scale'] /= 1.1
else:
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key in ['+', '=']:
if params['butterfly']:
params['butterfly_scale'] *= 1.1
else:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
elif event.key == 'pagedown':
if params['n_channels'] == 1 or params['butterfly']:
return
n_channels = params['n_channels'] - 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].collections.pop()
params['ax'].set_yticks(params['offsets'])
params['lines'].pop()
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'pageup':
if params['butterfly']:
return
from matplotlib.collections import LineCollection
n_channels = params['n_channels'] + 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
params['ax'].add_collection(lc)
params['ax'].set_yticks(params['offsets'])
params['lines'].append(lc)
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'home':
n_epochs = params['n_epochs'] - 1
if n_epochs <= 0:
return
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] -= n_times
params['hsel_patch'].set_width(params['duration'])
params['plot_fun']()
elif event.key == 'end':
n_epochs = params['n_epochs'] + 1
n_times = len(params['epochs'].times)
if n_times * n_epochs > len(params['data'][0]):
return
if params['t_start'] + n_times * n_epochs > len(params['data'][0]):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
if len(params['vert_lines']) > 0:
ax = params['ax']
pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
zorder=3))
params['duration'] += n_times
if params['t_start'] + params['duration'] > len(params['data'][0]):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
params['hsel_patch'].set_width(params['duration'])
params['plot_fun']()
elif event.key == 'b':
if params['fig_options'] is not None:
plt.close(params['fig_options'])
params['fig_options'] = None
_prepare_butterfly(params)
_plot_traces(params)
elif event.key == 'o':
if not params['butterfly']:
_open_options(params)
elif event.key == 'h':
_plot_histogram(params)
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'escape':
plt.close(params['fig'])
def _prepare_butterfly(params):
"""Helper function for setting up butterfly plot."""
from matplotlib.collections import LineCollection
butterfly = not params['butterfly']
if butterfly:
types = set(['grad', 'mag', 'eeg', 'eog',
'ecg']) & set(params['types'])
if len(types) < 1:
return
params['ax_vscroll'].set_visible(False)
ax = params['ax']
labels = ax.yaxis.get_ticklabels()
for label in labels:
label.set_visible(True)
ylim = (5. * len(types), 0.)
ax.set_ylim(ylim)
offset = ylim[0] / (4. * len(types))
ticks = np.arange(0, ylim[0], offset)
ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
ax.set_yticks(ticks)
used_types = 0
params['offsets'] = [ticks[2]]
if 'grad' in types:
pos = (0, 1 - (ticks[2] / ylim[0]))
params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'mag' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eeg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eog' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'ecg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
while len(params['lines']) < len(params['picks']):
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
ax.add_collection(lc)
params['lines'].append(lc)
else: # change back to default view
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
params['ax_vscroll'].set_visible(True)
while len(params['ax2'].texts) > 0:
params['ax2'].texts.pop()
n_channels = params['n_channels']
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
ylim = (25., 0.)
params['ax'].set_ylim(ylim)
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['ax'].set_yticks(params['offsets'])
params['butterfly'] = butterfly
def _onpick(event, params):
"""Helper to add a channel name on click"""
if event.mouseevent.button != 2 or not params['butterfly']:
return # text label added with a middle mouse button
lidx = np.where([l is event.artist for l in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use _mouse_click (happens once per click)
# to do the drawing
def _close_event(event, params):
"""Function to drop selected bad epochs. Called on closing of the plot."""
params['epochs'].drop_epochs(params['bads'])
logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
params['epochs'].info['bads'] = params['info']['bads']
def _resize_event(event, params):
"""Function to handle resize event"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _update_channels_epochs(event, params):
"""Function for changing the amount of channels and epochs per view."""
from matplotlib.collections import LineCollection
# Channels
n_channels = int(np.around(params['channel_slider'].val))
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
while len(params['lines']) < n_channels:
lc = LineCollection(list(), linewidths=0.5, antialiased=False,
zorder=2, picker=3.)
params['ax'].add_collection(lc)
params['lines'].append(lc)
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
params['n_channels'] = n_channels
# Epochs
n_epochs = int(np.around(params['epoch_slider'].val))
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] = n_times * n_epochs
params['hsel_patch'].set_width(params['duration'])
if params['t_start'] + n_times * n_epochs > len(params['data'][0]):
params['t_start'] = len(params['data'][0]) - n_times * n_epochs
params['hsel_patch'].set_x(params['t_start'])
_plot_traces(params)
def _toggle_labels(label, params):
"""Function for toggling axis labels on/off."""
if label == 'Channel names visible':
params['settings'][0] = not params['settings'][0]
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif label == 'Event-id visible':
params['settings'][1] = not params['settings'][1]
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif label == 'Epoch-id visible':
params['settings'][2] = not params['settings'][2]
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif label == 'Zeroline visible':
params['settings'][3] = not params['settings'][3]
_plot_vert_lines(params)
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _open_options(params):
"""Function for opening the option window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
if params['fig_options'] is not None:
# turn off options dialog
plt.close(params['fig_options'])
params['fig_options'] = None
return
width = 10
height = 3
fig_options = figure_nobar(figsize=(width, height), dpi=80)
fig_options.canvas.set_window_title('View settings')
params['fig_options'] = fig_options
ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
plt.axis('off')
params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
len(params['ch_names']),
valfmt='%0.0f',
valinit=params['n_channels'])
params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
len(params['epoch_times']),
valfmt='%0.0f',
valinit=params['n_epochs'])
params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
['Channel names visible',
'Event-id visible',
'Epoch-id visible',
'Zeroline visible'],
actives=params['settings'])
update = partial(_update_channels_epochs, params=params)
params['update_button'].on_clicked(update)
labels_callback = partial(_toggle_labels, params=params)
params['checkbox'].on_clicked(labels_callback)
close_callback = partial(_settings_closed, params=params)
params['fig_options'].canvas.mpl_connect('close_event', close_callback)
try:
params['fig_options'].canvas.draw()
params['fig_options'].show()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
except Exception:
pass
def _settings_closed(events, params):
"""Function to handle close event from settings dialog."""
params['fig_options'] = None
def _plot_histogram(params):
"""Function for plotting histogram of peak-to-peak values."""
import matplotlib.pyplot as plt
epochs = params['epochs']
p2p = np.ptp(epochs.get_data(), axis=2)
types = list()
data = list()
if 'eeg' in params['types']:
eegs = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'eeg'])
data.append(eegs.ravel())
types.append('eeg')
if 'mag' in params['types']:
mags = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'mag'])
data.append(mags.ravel())
types.append('mag')
if 'grad' in params['types']:
grads = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'grad'])
data.append(grads.ravel())
types.append('grad')
fig = plt.figure(len(types))
fig.clf()
scalings = _handle_default('scalings')
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
for idx in range(len(types)):
ax = plt.subplot(len(types), 1, idx + 1)
plt.xlabel(units[types[idx]])
plt.ylabel('count')
color = colors[types[idx]]
rej = None
if epochs.reject is not None and types[idx] in epochs.reject.keys():
rej = epochs.reject[types[idx]] * scalings[types[idx]]
rng = [0., rej * 1.1]
else:
rng = None
plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
range=rng)
if rej is not None:
ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
plt.title(titles[types[idx]])
fig.suptitle('Peak-to-peak histogram', y=0.99)
fig.subplots_adjust(hspace=0.6)
try:
fig.show()
except:
pass
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _label2idx(params, pos):
"""Aux function for click on labels. Returns channel name and idx."""
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return None, None
ch_idx = params['ch_start'] + line_idx
return text, ch_idx
| bsd-3-clause |
kimhungGCZ/combinedAL | run.py | 1 | 7745 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import os
try:
import simplejson as json
except ImportError:
import json
from nab.runner import Runner
from nab.util import (detectorNameToClass, checkInputs)
def getDetectorClassConstructors(detectors):
"""
Takes in names of detectors. Collects class names that correspond to those
detectors and returns them in a dict. The dict maps detector name to class
names. Assumes the detectors have been imported.
"""
detectorConstructors = {
d : globals()[detectorNameToClass(d)] for d in detectors}
return detectorConstructors
def main(args):
root = os.path.dirname(os.path.realpath(__file__))
numCPUs = int(args.numCPUs) if args.numCPUs is not None else None
dataDir = os.path.join(root, args.dataDir)
windowsFile = os.path.join(root, args.windowsFile)
resultsDir = os.path.join(root, args.resultsDir)
profilesFile = os.path.join(root, args.profilesFile)
thresholdsFile = os.path.join(root, args.thresholdsFile)
runner = Runner(dataDir=dataDir,
labelPath=windowsFile,
resultsDir=resultsDir,
profilesPath=profilesFile,
thresholdPath=thresholdsFile,
numCPUs=numCPUs)
runner.initialize()
if args.detect:
detectorConstructors = getDetectorClassConstructors(args.detectors)
runner.detect(detectorConstructors)
if args.optimize:
runner.optimize(args.detectors)
if args.score:
with open(args.thresholdsFile) as thresholdConfigFile:
detectorThresholds = json.load(thresholdConfigFile)
runner.score(args.detectors, detectorThresholds)
if args.normalize:
try:
runner.normalize()
except AttributeError("Error: you must run the scoring step with the "
"normalization step."):
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--detect",
help="Generate detector results but do not analyze results "
"files.",
default=False,
action="store_true")
parser.add_argument("--optimize",
help="Optimize the thresholds for each detector and user "
"profile combination",
default=False,
action="store_true")
parser.add_argument("--score",
help="Analyze results in the results directory",
default=False,
action="store_true")
parser.add_argument("--normalize",
help="Normalize the final scores",
default=False,
action="store_true")
parser.add_argument("--skipConfirmation",
help="If specified will skip the user confirmation step",
default=False,
action="store_true")
parser.add_argument("--dataDir",
default="data",
help="This holds all the label windows for the corpus.")
parser.add_argument("--resultsDir",
default="results",
help="This will hold the results after running detectors "
"on the data")
parser.add_argument("--windowsFile",
default=os.path.join("labels", "combined_windows.json"),
help="JSON file containing ground truth labels for the "
"corpus.")
parser.add_argument("-d", "--detectors",
nargs="*",
type=str,
default=["null", "numenta", "numentaTM", "random",
"bayesChangePt", "windowedGaussian", "expose",
"relativeEntropy", "knncad","contextOSE","skyline"],
help="Comma separated list of detector(s) to use, e.g. "
"null,numenta")
parser.add_argument("-p", "--profilesFile",
default=os.path.join("config", "profiles.json"),
help="The configuration file to use while running the "
"benchmark.")
parser.add_argument("-t", "--thresholdsFile",
default=os.path.join("config", "thresholds.json"),
help="The configuration file that stores thresholds for "
"each combination of detector and username")
parser.add_argument("-n", "--numCPUs",
default=None,
help="The number of CPUs to use to run the "
"benchmark. If not specified all CPUs will be used.")
args = parser.parse_args()
if (not args.detect
and not args.optimize
and not args.score
and not args.normalize):
args.detect = True
args.optimize = True
args.score = True
args.normalize = True
if len(args.detectors) == 1:
# Handle comma-seperated list argument.
args.detectors = args.detectors[0].split(",")
# The following imports are necessary for getDetectorClassConstructors to
# automatically figure out the detector classes.
# Only import detectors if used so as to avoid unnecessary dependency.
if "bayesChangePt" in args.detectors:
from nab.detectors.bayes_changept.bayes_changept_detector import (
BayesChangePtDetector)
if "numenta" in args.detectors:
from nab.detectors.numenta.numenta_detector import NumentaDetector
if "htmjava" in args.detectors:
from nab.detectors.htmjava.htmjava_detector import HtmjavaDetector
if "numentaTM" in args.detectors:
from nab.detectors.numenta.numentaTM_detector import NumentaTMDetector
if "null" in args.detectors:
from nab.detectors.null.null_detector import NullDetector
if "random" in args.detectors:
from nab.detectors.random.random_detector import RandomDetector
if "skyline" in args.detectors:
from nab.detectors.skyline.skyline_detector import SkylineDetector
if "windowedGaussian" in args.detectors:
from nab.detectors.gaussian.windowedGaussian_detector import (
WindowedGaussianDetector)
if "knncad" in args.detectors:
from nab.detectors.knncad.knncad_detector import KnncadDetector
if "relativeEntropy" in args.detectors:
from nab.detectors.relative_entropy.relative_entropy_detector import (
RelativeEntropyDetector)
# To run expose detector, you must have sklearn version 0.16.1 installed.
# Higher versions of sklearn may not be compatible with numpy version 1.9.2
# required to run nupic.
if "expose" in args.detectors:
from nab.detectors.expose.expose_detector import ExposeDetector
if "contextOSE" in args.detectors:
from nab.detectors.context_ose.context_ose_detector import (
ContextOSEDetector )
if args.skipConfirmation or checkInputs(args):
main(args)
| agpl-3.0 |
oesteban/niworkflows | niworkflows/conftest.py | 2 | 1514 | """py.test configuration"""
import os
from sys import version_info
from pathlib import Path
import numpy as np
import nibabel as nb
import pandas as pd
import pytest
import tempfile
import pkg_resources
from .utils.bids import collect_data
test_data_env = os.getenv(
"TEST_DATA_HOME", str(Path.home() / ".cache" / "stanford-crn")
)
data_dir = Path(test_data_env) / "BIDS-examples-1-enh-ds054"
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["PY_VERSION"] = version_info
doctest_namespace["np"] = np
doctest_namespace["nb"] = nb
doctest_namespace["pd"] = pd
doctest_namespace["os"] = os
doctest_namespace["pytest"] = pytest
doctest_namespace["Path"] = Path
doctest_namespace["datadir"] = data_dir
doctest_namespace["bids_collect_data"] = collect_data
doctest_namespace["test_data"] = pkg_resources.resource_filename(
"niworkflows", "tests/data"
)
tmpdir = tempfile.TemporaryDirectory()
doctest_namespace["tmpdir"] = tmpdir.name
nifti_fname = str(Path(tmpdir.name) / "test.nii.gz")
nii = nb.Nifti1Image(np.random.random((5, 5)).astype("f4"), np.eye(4))
nii.header.set_qform(np.diag([1, 1, 1, 1]), code=1)
nii.header.set_sform(np.diag([-1, 1, 1, 1]), code=1)
nii.to_filename(nifti_fname)
doctest_namespace["nifti_fname"] = nifti_fname
cwd = os.getcwd()
os.chdir(tmpdir.name)
yield
os.chdir(cwd)
tmpdir.cleanup()
@pytest.fixture
def testdata_dir():
return data_dir
| bsd-3-clause |
anntzer/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 34 | 4082 | r"""
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1]_ and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1]_ algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
masfaraud/genmechanics | setup.py | 1 | 4152 | # -*- coding: utf-8 -*-
"""
Setup install script for genmechanics
"""
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
from os.path import dirname, isdir, join
import re
from subprocess import CalledProcessError, check_output
tag_re = re.compile(r'\btag: %s([0-9][^,]*)\b')
version_re = re.compile('^Version: (.+)$', re.M)
def version_from_git_describe(version):
if version[0]=='v':
version = version[1:]
# PEP 440 compatibility
number_commits_ahead = 0
if '-' in version:
version, number_commits_ahead, commit_hash = version.split('-')
number_commits_ahead = int(number_commits_ahead)
# print('number_commits_ahead', number_commits_ahead)
split_versions = version.split('.')
if 'post' in split_versions[-1]:
suffix = split_versions[-1]
split_versions = split_versions[:-1]
else:
suffix = None
for pre_release_segment in ['a', 'b', 'rc']:
if pre_release_segment in split_versions[-1]:
if number_commits_ahead > 0:
split_versions[-1] = str(split_versions[-1].split(pre_release_segment)[0])
if len(split_versions) == 2:
split_versions.append('0')
if len(split_versions) == 1:
split_versions.extend(['0', '0'])
split_versions[-1] = str(int(split_versions[-1])+1)
future_version = '.'.join(split_versions)
return '{}.dev{}'.format(future_version, number_commits_ahead)
else:
return '.'.join(split_versions)
if number_commits_ahead > 0:
if len(split_versions) == 2:
split_versions.append('0')
if len(split_versions) == 1:
split_versions.extend(['0', '0'])
split_versions[-1] = str(int(split_versions[-1])+1)
split_versions = '.'.join(split_versions)
return '{}.dev{}'.format(split_versions, number_commits_ahead)
else:
if suffix is not None:
split_versions.append(suffix)
return '.'.join(split_versions)
# Just testing if get_version works well
assert version_from_git_describe('v0.1.7.post2') == '0.1.7.post2'
assert version_from_git_describe('v0.0.1-25-gaf0bf53') == '0.0.2.dev25'
assert version_from_git_describe('v0.1-15-zsdgaz') == '0.1.1.dev15'
assert version_from_git_describe('v1') == '1'
assert version_from_git_describe('v1-3-aqsfjbo') == '1.0.1.dev3'
def get_version():
# Return the version if it has been injected into the file by git-archive
version = tag_re.search('$Format:%D$')
if version:
return version.group(1)
d = dirname(__file__)
if isdir(join(d, '.git')):
cmd = 'git describe --tags'
try:
version = check_output(cmd.split()).decode().strip()[:]
except CalledProcessError:
raise RuntimeError('Unable to get version number from git tags')
return version_from_git_describe(version)
else:
# Extract the version from the PKG-INFO file.
with open(join(d, 'PKG-INFO')) as f:
version = version_re.search(f.read()).group(1)
# print('version', version)
return version
setup(name='genmechanics',
# use_scm_version={'write_to':'genmechanics/version.py'},
# setup_requires=['setuptools_scm'],
version = get_version(),
description='General mechanics solver for python',
long_description=readme(),
keywords='General mechanics',
url='https://github.com/Dessia-tech/genmechanics',
author='Steven Masfaraud',
author_email='[email protected]',
license='Creative Commons Attribution-Share Alike license',
include_package_data = True,
packages=['genmechanics'],
package_dir={'genmechanics': 'genmechanics'},
install_requires=['numpy', 'matplotlib', 'networkx', 'scipy',
'volmdlr>=0.2.0', 'cma'],
classifiers=['Topic :: Scientific/Engineering','Development Status :: 3 - Alpha'])
| gpl-3.0 |
detrout/debian-statsmodels | statsmodels/sandbox/distributions/examples/ex_mvelliptical.py | 34 | 5169 | # -*- coding: utf-8 -*-
"""examples for multivariate normal and t distributions
Created on Fri Jun 03 16:00:26 2011
@author: josef
for comparison I used R mvtnorm version 0.9-96
"""
from __future__ import print_function
import numpy as np
import statsmodels.sandbox.distributions.mv_normal as mvd
from numpy.testing import assert_array_almost_equal
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
#************** multivariate normal distribution ***************
mvn3 = mvd.MVNormal(mu, cov3)
#compare with random sample
x = mvn3.rvs(size=1000000)
xli = [[2., 1., 1.5],
[0., 2., 1.5],
[1.5, 1., 2.5],
[0., 1., 1.5]]
xliarr = np.asarray(xli).T[None,:, :]
#from R session
#pmvnorm(lower=-Inf,upper=(x[0,.]-mu)/sqrt(diag(cov3)),mean=rep(0,3),corr3)
r_cdf = [0.3222292, 0.3414643, 0.5450594, 0.3116296]
r_cdf_errors = [1.715116e-05, 1.590284e-05, 5.356471e-05, 3.567548e-05]
n_cdf = [mvn3.cdf(a) for a in xli]
assert_array_almost_equal(r_cdf, n_cdf, decimal=4)
print(n_cdf)
print('')
print((x<np.array(xli[0])).all(-1).mean(0))
print((x[...,None]<xliarr).all(1).mean(0))
print(mvn3.expect_mc(lambda x: (x<xli[0]).all(-1), size=100000))
print(mvn3.expect_mc(lambda x: (x[...,None]<xliarr).all(1), size=100000))
#other methods
mvn3n = mvn3.normalized()
assert_array_almost_equal(mvn3n.cov, mvn3n.corr, decimal=15)
assert_array_almost_equal(mvn3n.mean, np.zeros(3), decimal=15)
xn = mvn3.normalize(x)
xn_cov = np.cov(xn, rowvar=0)
assert_array_almost_equal(mvn3n.cov, xn_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xn.mean(0), decimal=2)
mvn3n2 = mvn3.normalized2()
assert_array_almost_equal(mvn3n.cov, mvn3n2.cov, decimal=2)
#mistake: "normalized2" standardizes - FIXED
#assert_array_almost_equal(np.eye(3), mvn3n2.cov, decimal=2)
xs = mvn3.standardize(x)
xs_cov = np.cov(xn, rowvar=0)
#another mixup xs is normalized
#assert_array_almost_equal(np.eye(3), xs_cov, decimal=2)
assert_array_almost_equal(mvn3.corr, xs_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xs.mean(0), decimal=2)
mv2m = mvn3.marginal(np.array([0,1]))
print(mv2m.mean)
print(mv2m.cov)
mv2c = mvn3.conditional(np.array([0,1]), [0])
print(mv2c.mean)
print(mv2c.cov)
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
print(mv2c.cov)
import statsmodels.api as sm
mod = sm.OLS(x[:,0], sm.add_constant(x[:,1:], prepend=True))
res = mod.fit()
print(res.model.predict(np.array([1,0,0])))
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
mv2c = mvn3.conditional(np.array([0]), [1, 1])
print(res.model.predict(np.array([1,1,1])))
print(mv2c.mean)
#the following wrong input doesn't raise an exception but produces wrong numbers
#mv2c = mvn3.conditional(np.array([0]), [[1, 1],[2,2]])
#************** multivariate t distribution ***************
mvt3 = mvd.MVT(mu, cov3, 4)
xt = mvt3.rvs(size=100000)
assert_array_almost_equal(mvt3.cov, np.cov(xt, rowvar=0), decimal=1)
mvt3s = mvt3.standardized()
mvt3n = mvt3.normalized()
#the following should be equal or correct up to numerical precision of float
assert_array_almost_equal(mvt3.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(mvt3n.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(np.eye(3), mvt3s.sigma, decimal=15)
xts = mvt3.standardize(xt)
xts_cov = np.cov(xts, rowvar=0)
xtn = mvt3.normalize(xt)
xtn_cov = np.cov(xtn, rowvar=0)
xtn_corr = np.corrcoef(xtn, rowvar=0)
assert_array_almost_equal(mvt3n.mean, xtn.mean(0), decimal=2)
#the following might fail sometimes (random test), add seed in tests
assert_array_almost_equal(mvt3n.corr, xtn_corr, decimal=1)
#watch out cov is not the same as sigma for t distribution, what's right here?
#normalize by sigma or by cov ? now normalized by sigma
assert_array_almost_equal(mvt3n.cov, xtn_cov, decimal=1)
assert_array_almost_equal(mvt3s.cov, xts_cov, decimal=1)
a = [0.0, 1.0, 1.5]
mvt3_cdf0 = mvt3.cdf(a)
print(mvt3_cdf0)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.3026741) # "error": 0.0004832187
print('R', 0.3026855) # error 3.444375e-06 with smaller abseps
print('diff', mvt3_cdf0 - 0.3026855)
a = [0.0, 0.5, 1.0]
mvt3_cdf1 = mvt3.cdf(a)
print(mvt3_cdf1)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.1946621) # "error": 0.0002524817)
print('R', 0.1946217) # "error:"2.748699e-06 with smaller abseps)
print('diff', mvt3_cdf1 - 0.1946217)
assert_array_almost_equal(mvt3_cdf0, 0.3026855, decimal=5)
assert_array_almost_equal(mvt3_cdf1, 0.1946217, decimal=5)
import statsmodels.distributions.mixture_rvs as mix
mu2 = np.array([4, 2.0, 2.0])
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
md = mix.mv_mixture_rvs([0.4, 0.6], 5, [mvt3, mvt3n], 3)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
#rvs2 = rvs[:,:2]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.plot(rvs[:,0], rvs[:,1], '.', alpha=0.25)
plt.title('1 versus 0')
fig.add_subplot(2, 2, 2)
plt.plot(rvs[:,0], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 0')
fig.add_subplot(2, 2, 3)
plt.plot(rvs[:,1], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 1')
#plt.show()
| bsd-3-clause |
srowen/spark | python/pyspark/pandas/mlflow.py | 9 | 8547 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
MLflow-related functions to load models and apply them to pandas-on-Spark dataframes.
"""
from typing import List, Union # noqa: F401 (SPARK-34943)
from pyspark.sql.types import DataType
import pandas as pd
import numpy as np
from typing import Any
from pyspark.pandas._typing import Label # noqa: F401 (SPARK-34943)
from pyspark.pandas.utils import lazy_property, default_session
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.typedef import as_spark_type
__all__ = ["PythonModelWrapper", "load_model"]
class PythonModelWrapper(object):
"""
A wrapper around MLflow's Python object model.
This wrapper acts as a predictor on pandas-on-Spark
"""
def __init__(self, model_uri: str, return_type_hint: str):
self._model_uri = model_uri
self._return_type_hint = return_type_hint
@lazy_property
def _return_type(self) -> DataType:
hint = self._return_type_hint
# The logic is simple for now, because it corresponds to the default
# case: continuous predictions
# TODO: do something smarter, for example when there is a sklearn.Classifier (it should
# return an integer or a categorical)
# We can do the same for pytorch/tensorflow/keras models by looking at the output types.
# However, this is probably better done in mlflow than here.
if hint == "infer" or not hint:
hint = np.float64
return as_spark_type(hint)
@lazy_property
def _model(self) -> Any:
"""
The return object has to follow the API of mlflow.pyfunc.PythonModel.
"""
from mlflow import pyfunc
return pyfunc.load_model(model_uri=self._model_uri)
@lazy_property
def _model_udf(self) -> Any:
from mlflow import pyfunc
spark = default_session()
return pyfunc.spark_udf(spark, model_uri=self._model_uri, result_type=self._return_type)
def __str__(self) -> str:
return "PythonModelWrapper({})".format(str(self._model))
def __repr__(self) -> str:
return "PythonModelWrapper({})".format(repr(self._model))
def predict(self, data: Union[DataFrame, pd.DataFrame]) -> Union[Series, pd.Series]:
"""
Returns a prediction on the data.
If the data is a pandas-on-Spark DataFrame, the return is a pandas-on-Spark Series.
If the data is a pandas Dataframe, the return is the expected output of the underlying
pyfunc object (typically a pandas Series or a numpy array).
"""
if isinstance(data, pd.DataFrame):
return self._model.predict(data)
elif isinstance(data, DataFrame):
return_col = self._model_udf(*data._internal.data_spark_columns)
# TODO: the columns should be named according to the mlflow spec
# However, this is only possible with spark >= 3.0
# s = F.struct(*data.columns)
# return_col = self._model_udf(s)
column_labels = [
(col,) for col in data._internal.spark_frame.select(return_col).columns
] # type: List[Label]
internal = data._internal.copy(
column_labels=column_labels, data_spark_columns=[return_col], data_fields=None
)
return first_series(DataFrame(internal))
else:
raise ValueError("unknown data type: {}".format(type(data).__name__))
def load_model(model_uri: str, predict_type: str = "infer") -> PythonModelWrapper:
"""
Loads an MLflow model into an wrapper that can be used both for pandas and pandas-on-Spark
DataFrame.
Parameters
----------
model_uri : str
URI pointing to the model. See MLflow documentation for more details.
predict_type : a python basic type, a numpy basic type, a Spark type or 'infer'.
This is the return type that is expected when calling the predict function of the model.
If 'infer' is specified, the wrapper will attempt to determine automatically the return type
based on the model type.
Returns
-------
PythonModelWrapper
A wrapper around MLflow PythonModel objects. This wrapper is expected to adhere to the
interface of mlflow.pyfunc.PythonModel.
Examples
--------
Here is a full example that creates a model with scikit-learn and saves the model with
MLflow. The model is then loaded as a predictor that can be applied on a pandas-on-Spark
Dataframe.
We first initialize our MLflow environment:
>>> from mlflow.tracking import MlflowClient, set_tracking_uri
>>> import mlflow.sklearn
>>> from tempfile import mkdtemp
>>> d = mkdtemp("pandas_on_spark_mlflow")
>>> set_tracking_uri("file:%s"%d)
>>> client = MlflowClient()
>>> exp = mlflow.create_experiment("my_experiment")
>>> mlflow.set_experiment("my_experiment")
We aim at learning this numerical function using a simple linear regressor.
>>> from sklearn.linear_model import LinearRegression
>>> train = pd.DataFrame({"x1": np.arange(8), "x2": np.arange(8)**2,
... "y": np.log(2 + np.arange(8))})
>>> train_x = train[["x1", "x2"]]
>>> train_y = train[["y"]]
>>> with mlflow.start_run():
... lr = LinearRegression()
... lr.fit(train_x, train_y)
... mlflow.sklearn.log_model(lr, "model")
LinearRegression(...)
Now that our model is logged using MLflow, we load it back and apply it on a pandas-on-Spark
dataframe:
>>> from pyspark.pandas.mlflow import load_model
>>> run_info = client.list_run_infos(exp)[-1]
>>> model = load_model("runs:/{run_id}/model".format(run_id=run_info.run_uuid))
>>> prediction_df = ps.DataFrame({"x1": [2.0], "x2": [4.0]})
>>> prediction_df["prediction"] = model.predict(prediction_df)
>>> prediction_df
x1 x2 prediction
0 2.0 4.0 1.355551
The model also works on pandas DataFrames as expected:
>>> model.predict(prediction_df[["x1", "x2"]].to_pandas())
array([[1.35555142]])
Notes
-----
Currently, the model prediction can only be merged back with the existing dataframe.
Other columns have to be manually joined.
For example, this code will not work:
>>> df = ps.DataFrame({"x1": [2.0], "x2": [3.0], "z": [-1]})
>>> features = df[["x1", "x2"]]
>>> y = model.predict(features)
>>> # Works:
>>> features["y"] = y # doctest: +SKIP
>>> # Will fail with a message about dataframes not aligned.
>>> df["y"] = y # doctest: +SKIP
A current workaround is to use the .merge() function, using the feature values
as merging keys.
>>> features['y'] = y
>>> everything = df.merge(features, on=['x1', 'x2'])
>>> everything
x1 x2 z y
0 2.0 3.0 -1 1.376932
"""
return PythonModelWrapper(model_uri, predict_type)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.mlflow
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.mlflow.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.mlflow tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.mlflow,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
try:
import mlflow # noqa: F401
import sklearn # noqa: F401
_test()
except ImportError:
pass
| apache-2.0 |
jorge2703/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/backends/backend_cocoaagg.py | 11 | 9980 | """
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad ([email protected])
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import os, sys
try:
import objc
except ImportError:
raise ImportError('The CococaAgg backend required PyObjC to be installed!')
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
from matplotlib import cbook
cbook.warn_deprecated(
'1.3',
message="The CocoaAgg backend is not a fully-functioning backend. "
"It may be removed in matplotlib 1.4.")
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import ShowBase
from .backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasCocoaAgg(figure)
return FigureManagerCocoaAgg(canvas, num)
## Below is the original show() function:
#def show():
# for manager in Gcf.get_all_fig_managers():
# manager.show()
#
## It appears that this backend is unusual in having a separate
## run function invoked for each figure, instead of a single
## mainloop. Presumably there is no blocking at all.
##
## Using the Show class below should cause no difference in
## behavior.
class Show(ShowBase):
def mainloop(self):
pass
show = Show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
dblclick = (event.clickCount() == 2)
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_press_event(loc.x, loc.y, button, dblclick=dblclick)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print('Unable to load Matplotlib Cocoa UI!', file=sys.stderr)
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( 'GetCurrentProcess', S(OSErr, OUTPSN) ),
( 'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( 'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( 'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, six.text_type):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print('ApplicationServices missing', file=sys.stderr)
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print('Missing', fn, file=sys.stderr)
return False
err, psn = d['GetCurrentProcess']()
if err:
print('GetCurrentProcess', (err, psn), file=sys.stderr)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print('CPSSetProcessName', (err, psn), file=sys.stderr)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print('SetFrontProcess', (err, psn), file=sys.stderr)
return False
return True
FigureCanvas = FigureCanvasCocoaAgg
FigureManager = FigureManagerCocoaAgg
| gpl-2.0 |
loretoparisi/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4.py | 69 | 20664 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, IdleEvent, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
from PyQt4 import QtCore, QtGui, Qt
except ImportError:
raise ImportError("Qt4 backend requires that PyQt4 is installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : QtCore.Qt.SizeAllCursor,
cursors.HAND : QtCore.Qt.PointingHandCursor,
cursors.POINTER : QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION : QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
if QtGui.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = QtGui.QApplication( [" "] )
QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ),
qApp, QtCore.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
QtGui.qApp.exec_()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
thisFig = Figure( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ):
keyvald = { QtCore.Qt.Key_Control : 'control',
QtCore.Qt.Key_Shift : 'shift',
QtCore.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
QtGui.QWidget.__init__( self )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
# hide until we can test and fix
#self.startTimer(backend_IdleEvent.milliseconds)
w,h = self.get_width_height()
self.resize( w, h )
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
#if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
QtGui.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
QtGui.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return QtCore.QSize( w, h )
def minumumSizeHint( self ):
return QtCore.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = str(event.text())
elif event.key() in self.keyvald:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
Qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = QtGui.QMainWindow()
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
self.window.setWindowIcon(QtGui.QIcon( image ))
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
self.window.addToolBar(self.toolbar)
QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"),
self.window.statusBar().showMessage)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close()
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ):
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.coordinates = coordinates
QtGui.QToolBar.__init__( self, parent )
NavigationToolbar2.__init__( self, canvas )
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
a = self.addAction(self._icon('home.svg'), 'Home', self.home)
a.setToolTip('Reset original view')
a = self.addAction(self._icon('back.svg'), 'Back', self.back)
a.setToolTip('Back to previous view')
a = self.addAction(self._icon('forward.svg'), 'Forward', self.forward)
a.setToolTip('Forward to next view')
self.addSeparator()
a = self.addAction(self._icon('move.svg'), 'Pan', self.pan)
a.setToolTip('Pan axes with left mouse, zoom with right')
a = self.addAction(self._icon('zoom_to_rect.svg'), 'Zoom', self.zoom)
a.setToolTip('Zoom to rectangle')
self.addSeparator()
a = self.addAction(self._icon('subplots.png'), 'Subplots',
self.configure_subplots)
a.setToolTip('Configure subplots')
a = self.addAction(self._icon('filesave.svg'), 'Save',
self.save_figure)
a.setToolTip('Save the figure')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtGui.QLabel( "", self )
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop )
self.locLabel.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.emit(QtCore.SIGNAL("message"), s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = QtGui.QMainWindow()
win = self.adj_window
win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
win.setWindowTitle("Subplot Configuration Tool")
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
win.setWindowIcon(QtGui.QIcon( image ))
tool = SubplotToolQt(self.canvas.figure, win)
win.setCentralWidget(tool)
win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = QtGui.QFileDialog.getSaveFileName(
self, "Choose a filename to save to", start, filters, selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
QtGui.QMessageBox.critical(
self, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
class SubplotToolQt( SubplotTool, QtGui.QWidget ):
def __init__(self, targetfig, parent):
QtGui.QWidget.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical)
# constraints
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderright.setMinimum )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderleft.setMaximum )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.slidertop.setMinimum )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderbottom.setMaximum )
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:')
for slider, adjustment in zip(sliders, adjustments):
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
layout = QtGui.QGridLayout()
leftlabel = QtGui.QLabel('left')
layout.addWidget(leftlabel, 2, 0)
layout.addWidget(self.sliderleft, 2, 1)
toplabel = QtGui.QLabel('top')
layout.addWidget(toplabel, 0, 2)
layout.addWidget(self.slidertop, 1, 2)
layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter)
bottomlabel = QtGui.QLabel('bottom')
layout.addWidget(QtGui.QLabel('bottom'), 4, 2)
layout.addWidget(self.sliderbottom, 3, 2)
layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter)
rightlabel = QtGui.QLabel('right')
layout.addWidget(rightlabel, 2, 4)
layout.addWidget(self.sliderright, 2, 3)
hspacelabel = QtGui.QLabel('hspace')
layout.addWidget(hspacelabel, 0, 6)
layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderhspace, 1, 6)
layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter)
wspacelabel = QtGui.QLabel('wspace')
layout.addWidget(wspacelabel, 4, 6)
layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderwspace, 3, 6)
layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom)
layout.setRowStretch(1,1)
layout.setRowStretch(3,1)
layout.setColumnStretch(1,1)
layout.setColumnStretch(3,1)
layout.setColumnStretch(6,1)
self.setLayout(layout)
self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000))
self.sliderbottom.setSliderPosition(\
int(targetfig.subplotpars.bottom*1000))
self.sliderright.setSliderPosition(\
int(targetfig.subplotpars.right*1000))
self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000))
self.sliderwspace.setSliderPosition(\
int(targetfig.subplotpars.wspace*1000))
self.sliderhspace.setSliderPosition(\
int(targetfig.subplotpars.hspace*1000))
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcleft )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcbottom )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcright )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.functop )
QtCore.QObject.connect( self.sliderwspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcwspace )
QtCore.QObject.connect( self.sliderhspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funchspace )
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
self.targetfig.subplots_adjust(left=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
self.targetfig.subplots_adjust(right=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
self.targetfig.subplots_adjust(bottom=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
self.targetfig.subplots_adjust(top=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| agpl-3.0 |
arahuja/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
jat255/hyperspy | examples/data_navigation/2D_image_stack.py | 12 | 1130 | """Creates a 4D image and plots it
"""
import numpy as np
import hyperspy.api as hs
import matplotlib.pyplot as plt
# Create a 2D image stack with random data
im = hs.signals.Signal2D(np.random.random((16, 16, 32, 32)))
# Define the axis properties
im.axes_manager.signal_axes[0].name = ''
im.axes_manager.signal_axes[0].units = '1/nm'
im.axes_manager.signal_axes[0].scale = 0.1
im.axes_manager.signal_axes[0].offset = 0
im.axes_manager.signal_axes[1].name = ''
im.axes_manager.signal_axes[1].units = '1/nm'
im.axes_manager.signal_axes[1].scale = 0.1
im.axes_manager.signal_axes[1].offset = 0
im.axes_manager.navigation_axes[0].name = 'X'
im.axes_manager.navigation_axes[0].units = 'nm'
im.axes_manager.navigation_axes[0].scale = 0.3
im.axes_manager.navigation_axes[0].offset = 100
im.axes_manager.navigation_axes[1].name = 'Y'
im.axes_manager.navigation_axes[1].units = 'nm'
im.axes_manager.navigation_axes[1].scale = 0.3
im.axes_manager.navigation_axes[1].offset = 100
# Give a title
im.metadata.General.title = 'Random 2D image stack'
im.plot()
plt.show() # No necessary when running in the HyperSpy's IPython profile
| gpl-3.0 |
davidpvilaca/TEP | aula7/tarefa3.py | 1 | 1970 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 22:53:33 2017
@author: davidpvilaca
"""
#import matplotlib.pyplot as plt
import numpy as np
import cv2
def track(image):
'''Accepts BGR image as Numpy array
Returns: (x,y) coordinates of centroid if found
(-1,-1) if no centroid was found
None if user hit ESC
'''
# Blur the image to reduce noise
blur = cv2.GaussianBlur(image, (5,5),0)
# Convert BGR to HSV
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
# Threshold the HSV image for only green colors
lower_green = np.array([40,180,100])
upper_green = np.array([60,230,180])
# Threshold the HSV image to get only green colors
mask = cv2.inRange(hsv, lower_green, upper_green)
# Blur the mask
bmask = cv2.GaussianBlur(mask, (5,5),0)
# Take the moments to get the centroid
moments = cv2.moments(bmask)
m00 = moments['m00']
centroid_x, centroid_y = None, None
if m00 != 0:
centroid_x = int(moments['m10']/m00)
centroid_y = int(moments['m01']/m00)
# Assume no centroid
ctr = (-1,-1)
# Use centroid if it exists
if centroid_x != None and centroid_y != None:
ctr = (centroid_x, centroid_y)
# Put black circle in at centroid in image
cv2.circle(image, ctr, 4, (0,0,0))
# Display full-color image
cv2.imshow('Bola Verde', image)
# Force image display, setting centroid to None on ESC key input
if cv2.waitKey(1) & 0xFF == 27:
ctr = None
# Return coordinates of centroid
return ctr
def main():
cap = cv2.VideoCapture('Tracking.mp4')
ret, frame = cap.read()
while(ret):
if not track(frame):
break
if cv2.waitKey(1) & 0xFF == 27:
break
ret, frame = cap.read()
cap.release()
cv2.destroyAllWindows()
return 0
if __name__ == '__main__':
main()
| mit |
rinman24/ucsd_ch | coimbra_chamber/tests/utility/plot/plt_util_acceptance_test.py | 1 | 8955 | """Unit test suite for plot utility."""
import dataclasses
import dacite
import pytest
from coimbra_chamber.utility.plot.contracts import (
Axis,
DataSeries,
Layout,
Plot)
from coimbra_chamber.utility.plot.service import PlotUtility
# ----------------------------------------------------------------------------
# Fixtures
@pytest.fixture(scope='module')
def plt_util():
"""Create a module level instance of the plotting utility."""
return PlotUtility()
@pytest.fixture(scope='function')
def time():
"""Create a common time axis."""
data = dict(values=list(range(10)))
return dacite.from_dict(DataSeries, data)
@pytest.fixture(scope='function')
def position_1():
"""Create position of car 1."""
data = dict(
values=[x**2 for x in range(10)],
label='car 1')
return dacite.from_dict(DataSeries, data)
@pytest.fixture(scope='function')
def position_2():
"""Create position of car 2."""
data = dict(
values=[x**2.1 for x in range(10)],
label='car 2')
return dacite.from_dict(DataSeries, data)
@pytest.fixture(scope='function')
def one_car_position_axis(position_1):
"""Create a one car position in time axis."""
data = dict(
data=[position_1],
y_label='position')
return dacite.from_dict(Axis, data)
@pytest.fixture(scope='function')
def one_car_position_plot(time, one_car_position_axis):
"""Create a one car position in time plot."""
data = dict(
abscissa=time,
axes=[one_car_position_axis],
x_label='time')
return dacite.from_dict(Plot, data)
@pytest.fixture(scope='function')
def two_car_position_axis(position_1, position_2):
"""Create a two car position in time axis."""
data = dict(
data=[position_1, position_2],
y_label='position')
return dacite.from_dict(Axis, data)
@pytest.fixture(scope='function')
def two_car_position_plot(time, two_car_position_axis):
"""Create a two car position in time plot."""
data = dict(
abscissa=time,
axes=[two_car_position_axis],
x_label='time')
return dacite.from_dict(Plot, data)
@pytest.fixture(scope='function')
def velocity_1():
"""Create velocity of car 1."""
data = dict(
values=[2 * x for x in range(10)],
sigma=[5]*10,
label='car 1')
return dacite.from_dict(DataSeries, data)
@pytest.fixture(scope='function')
def velocity_2():
"""Create velocity of car 1."""
data = dict(
values=[2.1 * x for x in range(10)],
sigma=[5]*10,
label='car 2')
return dacite.from_dict(DataSeries, data)
@pytest.fixture(scope='function')
def two_car_velocity_axis(velocity_1, velocity_2):
"""Create a two car velocity in time axis."""
data = dict(
data=[velocity_1, velocity_2],
y_label='velocity')
return dacite.from_dict(Axis, data)
@pytest.fixture(scope='function')
def two_car_velocity_plot(time, two_car_velocity_axis):
"""Create a two car velocity in time plot."""
data = dict(
abscissa=time,
axes=[two_car_velocity_axis],
x_label='time')
return dacite.from_dict(Plot, data)
# ----------------------------------------------------------------------------
# Acceptance tests
@pytest.mark.parametrize('style', ['seaborn-darkgrid', '', 'dark_background'])
def test_can_set_global_style(style, plt_util, one_car_position_plot): # noqa: D103
# Arrange ----------------------------------------------------------------
data = dict(plots=[one_car_position_plot], style=style)
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
@pytest.mark.parametrize('error_type', ['', 'continuous'])
def test_can_plot_ordinate_errorbars(
plt_util, position_1, one_car_position_axis, one_car_position_plot,
error_type): # noqa: D103
# Arrange ----------------------------------------------------------------
# DataSeries
changes = dict(sigma=[5]*10)
position_1 = dataclasses.replace(position_1, **changes)
# Axis
changes = dict(data=[position_1], error_type=error_type)
one_car_position_axis = dataclasses.replace(one_car_position_axis, **changes)
# Plot
changes = dict(axes=[one_car_position_axis])
one_car_position_plot = dataclasses.replace(one_car_position_plot, **changes)
# Create the layout
data = dict(plots=[one_car_position_plot])
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
@pytest.mark.parametrize('error_type', ['', 'continuous'])
def test_can_plot_abscissa_errorbars(
plt_util, time, one_car_position_axis, one_car_position_plot,
error_type): # noqa: D103
# Arrange ----------------------------------------------------------------
# DataSeries
changes = dict(sigma=[0.5]*10)
time = dataclasses.replace(time, **changes)
# Axis
changes = dict(error_type=error_type)
one_car_position_axis = dataclasses.replace(one_car_position_axis, **changes)
# Plot
changes = dict(abscissa=time, axes=[one_car_position_axis])
one_car_position_plot = dataclasses.replace(one_car_position_plot, **changes)
# Create the layout
data = dict(plots=[one_car_position_plot])
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
@pytest.mark.parametrize('error_type', ['', 'continuous'])
def test_can_plot_abscissa_and_ordinate_errorbars(
plt_util, time, position_1, one_car_position_axis,
one_car_position_plot, error_type): # noqa: D103
# Arrange ----------------------------------------------------------------
# DataSeries
changes = dict(sigma=[0.5]*10)
time = dataclasses.replace(time, **changes)
changes = dict(sigma=[5]*10)
position_1 = dataclasses.replace(position_1, **changes)
# Axis
changes = dict(data=[position_1], error_type=error_type)
one_car_position_axis = dataclasses.replace(one_car_position_axis, **changes)
# Plot
changes = dict(abscissa=time, axes=[one_car_position_axis])
one_car_position_plot = dataclasses.replace(one_car_position_plot, **changes)
# Create the layout
data = dict(plots=[one_car_position_plot])
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
def test_can_plot_multiple_plots_on_one_axis(plt_util, two_car_position_plot): # noqa: D103
# Arrange ----------------------------------------------------------------
data = dict(plots=[two_car_position_plot])
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
def test_layout_length_2(
plt_util, two_car_position_plot, two_car_velocity_plot): # noqa: D103
# Arrange ----------------------------------------------------------------
data = dict(
plots=[two_car_position_plot, two_car_velocity_plot],
style='seaborn-darkgrid')
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
def test_layout_length_3(
plt_util, two_car_position_plot, two_car_velocity_plot): # noqa: D103
# Arrange ----------------------------------------------------------------
data = dict(
plots=[two_car_position_plot, two_car_velocity_plot,
two_car_position_plot],
style='seaborn-deep')
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
def test_layout_length_4(
plt_util, two_car_position_plot, two_car_velocity_plot): # noqa: D103
# Arrange ----------------------------------------------------------------
data = dict(
plots=[two_car_position_plot, two_car_velocity_plot,
two_car_position_plot, two_car_velocity_plot],
style='grayscale')
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
def test_can_plot_two_y_axis_on_single_plot(
plt_util, two_car_position_axis, two_car_velocity_axis,
two_car_position_plot): # noqa: D103
# Arrange ----------------------------------------------------------------
# Plot
changes = dict(axes=[two_car_position_axis, two_car_velocity_axis])
plot = dataclasses.replace(two_car_position_plot, **changes)
data = dict(plots=[plot])
layout = dacite.from_dict(Layout, data)
# Act --------------------------------------------------------------------
plt_util.plot(layout)
| mit |
tosolveit/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/tests/test_cross_validation.py | 2 | 14355 | """Test the cross_validation module"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from nose.tools import assert_true, assert_equal
from nose.tools import assert_raises
from sklearn.utils.testing import assert_greater, assert_less
from sklearn.utils.fixes import unique
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_iris
from sklearn.metrics import zero_one_score
from sklearn.metrics import f1_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import explained_variance_score
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.svm.sparse import SVC as SparseSVC
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0):
self.a = a
def fit(self, X, Y):
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) / 2
##############################################################################
# Tests
def test_kfold():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
y = [0, 0, 1, 1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
all_folds = None
for train, test in kf:
if all_folds is None:
all_folds = test.copy()
else:
all_folds = np.concatenate((all_folds, test))
all_folds.sort()
assert_array_equal(all_folds, np.arange(300))
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf1 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=True)
kf2 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=False)
ind = np.arange(300)
for kf in (kf1, kf2):
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert np.any(sorted_array != ind[train])
sorted_array = np.arange(101, 200)
assert np.any(sorted_array != ind[train])
sorted_array = np.arange(201, 300)
assert np.any(sorted_array != ind[train])
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
ss4 = cval.ShuffleSplit(10, test_size=long(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
ys = [
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0, indices=True)
for train, test in sss:
assert_array_equal(unique(y[train]), unique(y[test]))
# Checks if folds keep classes proportions
p_train = np.bincount(
unique(y[train], return_inverse=True)[1]
) / float(len(y[train]))
p_test = np.bincount(
unique(y[test], return_inverse=True)[1]
) / float(len(y[test]))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3),
train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=0.6, train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=2, train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_shuffle_split_warnings():
expected_message = ("test_fraction is deprecated in 0.11 and scheduled "
"for removal in 0.13, use test_size instead",
"train_fraction is deprecated in 0.11 and scheduled "
"for removal in 0.13, use train_size instead")
with warnings.catch_warnings(record=True) as warn_queue:
cval.ShuffleSplit(10, 3, test_fraction=0.1)
cval.ShuffleSplit(10, 3, train_fraction=0.1)
cval.train_test_split(range(3), test_fraction=0.1)
cval.train_test_split(range(3), train_fraction=0.1)
assert_equal(len(warn_queue), 4)
assert_equal(str(warn_queue[0].message), expected_message[0])
assert_equal(str(warn_queue[1].message), expected_message[1])
assert_equal(str(warn_queue[2].message), expected_message[0])
assert_equal(str(warn_queue[3].message), expected_message[1])
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = range(10)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = \
cval.train_test_split(X, X_s, y)
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=zero_one_score, cv=5)
assert_array_almost_equal(zo_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=f1_score, cv=5)
assert_array_almost_equal(f1_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, score_func=r2_score, cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=mean_squared_error)
expected_mse = np.array([763.07, 553.16, 274.38, 273.26, 1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
ev_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=explained_variance_score)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, zero_one_score, cv)
assert_greater(score, 0.9)
np.testing.assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, zero_one_score, cv, labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SparseSVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2, indices=True)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, zero_one_score, cv_sparse,
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(svm, X, y,
zero_one_score, cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.4)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
ss = cval.ShuffleSplit(4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=True)
lpo = cval.LeavePOut(4, 2, indices=True)
kf = cval.KFold(4, 2, indices=True)
skf = cval.StratifiedKFold(y, 2, indices=True)
lolo = cval.LeaveOneLabelOut(labels, indices=True)
lopo = cval.LeavePLabelOut(labels, 2, indices=True)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8,
train_size=3)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
| agpl-3.0 |
timqian/neurons | neurons/learning.py | 2 | 6346 | """
This module contains classes that can learn the weights.
"""
import numpy as np
class STDP:
"""
Spike Timing Dependent Plasticity
"""
def __init__(self, eta, w_in, w_out, tau, window_size, verbose=False, tau2=None):
"""
:param eta: learning rate
:param w_in:
:param w_out:
:param tau: The tau parameter for the learning window. If you want an unsymmetric window, then also set tau2.
:param window_size:
:param verbose: Verbose output of the weight change.
:param tau2: If learning window is unsymmetric, then tau2 is the tau parameter for x-values GREATER than 0. If not given, it defaults to tau.
:return:
"""
self.eta = eta
self.w_in = w_in
self.w_out = w_out
self.tau = tau
self.tau2 = tau2 if tau2 else tau
self.window_size = window_size # T_l
self.verbose = verbose
def learning_window_neuron_pre(self, t1, t2_list):
"""
Return the sum of the learning windows of one neuron.
:param t1: current time
:param t2_list: spiking times of neuron
"""
sum_result = 0
for t2 in t2_list:
sum_result += self.learning_window(t2 - t1)
return sum_result
def learning_window_neuron_post(self, t1, t2_list):
"""
Return the sum of the learning windows of one neuron.
:param t1: current time
:param t2_list: spiking times of neuron
"""
sum_result = 0
for t2 in t2_list:
sum_result += self.learning_window(t1 - t2)
return sum_result
def learning_window(self, x):
"""
Constant Learning Window
:param x:
:return:
"""
if x > 0:
return - np.exp(-x / self.tau2)
elif x < 0:
return np.exp(x / self.tau)
else:
return 0
def weight_change(self, spikes, weights, t):
"""
Calculate the weight change at time t. Changes the weights in place.
:param spikes: Spiketrain
:param weights: current weights
:return: Changes in weights
"""
if weights.dtype != 'float':
raise ValueError('The weight matrix has to be a float array. (Try to create it with dtype=float)')
# Trim spiketrain, so that it's 'windowed' (look at variable T_l in the script)
spikes = spikes[:, max(0, t+1-self.window_size):t+1]
if not spikes.any():
if self.verbose:
print("--------------------------")
print("Calculating STDP weight change at time")
print("No spikes found")
return np.zeros(weights.shape)
neurons, current_time = spikes.shape
current_time -= 1 # because index begins with 0
connected_neurons = np.array(weights, dtype=bool)
last_spikes = spikes[:, -1]
last_spikes = last_spikes[:, np.newaxis]
# Calculate the weight change for presynaptic spikes
weight_change_presynaptic = last_spikes * connected_neurons * self.w_in
# Calculate the weight change for postsynaptic spikes
weight_change_postsynaptic = last_spikes.T * connected_neurons * self.w_out
# Calculate the weight changes in regards of the learning window
spikes_time = []
for neuron in range(neurons):
spikes_time.append([])
for time, spike in enumerate(spikes[neuron, :]):
if spike:
spikes_time[neuron].append(time)
neuron_learnwindow_pre = [self.learning_window_neuron_pre(current_time, x) for x in spikes_time]
neuron_learnwindow_pre = np.array(neuron_learnwindow_pre, ndmin=2).T # Make it a column-vector
neuron_learnwindow_post = [self.learning_window_neuron_post(current_time, x) for x in spikes_time]
neuron_learnwindow_post = np.array(neuron_learnwindow_post, ndmin=2).T # Make it a column-vector
learning_window_presynaptic = (last_spikes.T * connected_neurons) * neuron_learnwindow_pre
learning_window_postsynaptic = (last_spikes * connected_neurons) * neuron_learnwindow_post.T
# Total weight change
weight_change = self.eta * (weight_change_presynaptic + weight_change_postsynaptic + learning_window_presynaptic
+ learning_window_postsynaptic)
# Change the weight in place
weights = weights.__iadd__(weight_change)
if self.verbose:
print("--------------------------")
print("Calculating STDP weight change at time")
print("Last spikes", last_spikes)
print("Weight change in:", weight_change_presynaptic)
print("Weight change out:", weight_change_postsynaptic)
print("Outgoing spikes time", spikes_time)
print("Neuron learnwindow pre", neuron_learnwindow_pre)
print("Neuron learnwindow post", neuron_learnwindow_post)
print("Presyncpit:", learning_window_presynaptic)
print("Postsynapitc:", learning_window_postsynaptic)
print("Summe (pres): ", neuron_learnwindow_pre, neuron_learnwindow_pre.shape)
print("Summe (post): ", neuron_learnwindow_post, neuron_learnwindow_post.shape)
print("presynaptic learning window", learning_window_presynaptic)
print("postsynaptic learning window", learning_window_postsynaptic)
print("type of weight change:", type(weight_change))
print("updated weights (function):", weights)
print("")
return weight_change
if __name__ == "__main__":
s = np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 1]], dtype=bool)
w = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 0]], dtype=float)
print("Spike Train", s)
print("Weights", w)
learning_model = STDP(eta=0.05, w_in=0.5, w_out=0.5, tau=10.0, window_size=4, verbose=True)
print("Weight change: ", learning_model.weight_change(s, w, 2))
print("updated weights", w)
import matplotlib.pyplot as plt
x = np.linspace(-15, 15, 1000)
y = np.array([learning_model.learning_window(xv) for xv in x])
plt.plot(x,y)
plt.show() | bsd-2-clause |
JohnUrban/fast5tools | fast5tools/plotops.py | 1 | 12693 | import sys
from collections import defaultdict
# Plotting
import matplotlib
## may need following line for remote jobs (e.g. submitting batch scripts)
## matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
## Fast5Tools
from fast5tools.helperops import *
## 2018-04-20
## Some of the contents derive from old code imported from my poreminion tools
## Needs to be tested, cleaned up, re-done
def qualhist(quals, filename=None, minrange=0, maxrange=20, step=1, density=False, cumulative=False, text_only=True):
## TODO: histtype : bar, barstacked, step, stepfilled;; orientation : horizontal, vertical;;
## TODO: rwidth, color,
if quals:
bins = range(minrange, maxrange, step)
ylab = 'Density' if density else 'Frequency'
ylab += ' (cumulative)' if cumulative else ''
n, outbins, patches = plt.hist(x=quals, bins=bins, density=density, cumulative=cumulative)
plt.xlabel("Base Quality")
plt.ylabel(ylab)
plt.xticks(rotation=65, fontsize=8)
else:
sys.stderr.write("No reads that meet criteria...\n")
if text_only:
hist_as_txt = '\n'.join([str(k)+'\t'+str(v) for k,v in zip(bins, n)]).strip()
if filename is not None:
with open(filename, 'w') as txtout:
txtout.write(hist_as_txt)
else:
sys.stdout.write(hist_as_txt)
else:
if filename is not None:
try:
plt.savefig(filename)
plt.close()
except:
sys.stderr.write("Unrecognized extension for %s!\nTry .pdf or .jpg or .png \n" % (plot_file))
else:
plt.show()
def update_qualpos(quals, qualpos, bin_width=1000, zscores=False, robust=False):
''' returns dictionary with keys=positions, values=lists of qual scores for that position
qualpos = defaultdict(list)
zpos = defaultdict(list)
bin_width = integer
'''
if zscores or robust:
if robust:
qmean = np.median(quals)
qSD = np.median( abs( np.array(quals) - qmean ) )
else:
qmean = np.mean(quals)
qSD = np.std(quals)
ctr = 0
for q in quals:
ctr += 1
if zscores or robust:
qualpos[1+int(ctr//bin_width)].append((q-qmean)/qSD) ## Extra 1 is to put in 1-based pos
else:
qualpos[1+int(ctr//bin_width)].append(q) ## This is collecting information for bins default 1 kb
return qualpos
def qualposplot(qualpos, bin_width, zscores=False, robust=False, filename=None):
if zscores:
ylab = "Quality Z-score"
plotout = "qualZscore-Vs-pos"
elif robust:
ylab = "Robust Quality Z-score"
plotout = "robustQualZscore-Vs-pos"
else:
ylab = "Quality score"
plotout = "qual-Vs-pos"
if qualpos.keys():
data = [qualpos[e] for e in sorted(qualpos.keys())]
plt.boxplot(data)
xdetail = " (" + str(bin_width) + " bp bins)"
plt.xlabel("Bin number in read" + xdetail)
plt.ylabel(ylab)
plt.xticks(rotation=65, fontsize=8)
else:
sys.stderr.write("No reads that meet criteria: cannot construct quality-Zscore vs. position scatter plot...\n")
if filename is not None:
try:
plt.savefig(filename)
plt.close()
except:
sys.stderr.write("Unrecognized extension for %s!\nTry .pdf or .jpg or .png \n" % (plot_file))
else:
plt.show()
################################################################
#####
################################################################
def choose_kmer_plot(kmerdict=False, refdict=False, gg=False):
if kmerdict and not refdict:
if not gg:
singleTableKmerPlot(kmerdict)
else:
singleTablePlot_gg(parser, args)
elif kmerdict and refdict:
twoTableKmerScatterPlot(kmerdict, refdict)
def general_barplot(x, height, width=1.0, edgecolor='k', align='center', saveas=False):
# For some, use: align='edge'
plt.bar(x=x, height=height, width=width, edgecolor=edgecolor)
if saveas:
plt.savefig(saveas)
else:
plt.show()
plt.close()
def singleTableKmerHist(kmercounts, density=False, cumulative=False, saveas=False):
''' kmerdict is a defaultdict(int)
It can take both empty and non-empty kmerdicts
returns update of the input kmerdict given the input string and k'''
kmerdict = kmercounts if isinstance(kmercounts, dict) else kmercount_in_table(kmercounts)
numKmers = len(kmerdict)
data = kmerDictToPlotData(kmerdict)
n, outbins, patches = plt.hist(x=data['counts'], density=density, cumulative=cumulative)
if saveas:
plt.savefig(saveas)
else:
plt.show()
plt.close()
def singleTableKmerPlot(kmercounts, saveas=False):
''' kmerdict is a defaultdict(int)
It can take both empty and non-empty kmerdicts
returns update of the input kmerdict given the input string and k'''
kmerdict = kmercounts if isinstance(kmercounts, dict) else kmercount_in_table(kmercounts)
numKmers = len(kmerdict)
data = kmerDictToPlotData(kmerdict)
general_barplot(x=range(1,numKmers+1), height=data['counts'], width=1.0, saveas=saveas)
def twoTableKmerScatterPlot(kmercounts, refcounts, saveas=False):
''' kmerdict is a defaultdict(int)
It can take both empty and non-empty kmerdicts
returns update of the input kmerdict given the input string and k'''
## read in and equilibrate the 2 kmer count tables
kmerdict, refdict = readInTwoKmerTables(kmercounts, refcounts)
## make approp data structures
test = kmerDictToPlotData(kmerdict)
reference = kmerDictToPlotData(refdict)
plt.scatter(x=reference['counts'], y=test['counts'], s=10, marker='.')
for i in range(len(test['kmers'])):
plt.annotate(test['kmers'][i], (reference['counts'][i],test['counts'][i]), size='xx-small')
plt.xlabel('Reference')
plt.ylabel('Test')
if saveas:
plt.savefig(saveas)
else:
plt.show()
plt.close()
def general_scatter(x, y, words=False, saveas=False, xlab="", ylab="", s=5, cex='xx-small', marker='.'):
plt.scatter(x=x, y=y, s=s, marker=marker)
if words:
for i in range(len(words)):
plt.annotate(words[i], (x[i], y[i]), size=cex)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.autoscale()
if saveas:
plt.savefig(saveas)
else:
plt.show()
plt.close()
def twoTableKmer_MA_Plot(medNormObj, base=2, saveas=False, s=5, cex='xx-small'):
x = medNormObj.get_logavg(base)
y = medNormObj.get_logfc(base)
k = medNormObj.get_genes()
xlab = 'Average Log' + str(base) + ' Counts'
ylab = 'Log' + str(base) + ' Fold Change'
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
def volcanoPlot(logfc, p, k, saveas=False, xlab="log2(Fold Change)", ylab="-log10(p-value)", s=5, cex='xx-small'):
'''
logfc is expected to be log fold-changes
p is expected to be p-values
'''
x = [e for e in logfc]
y = [-1*log10(e) for e in p]
#y = -1*logbase(p,base=10)
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
def smearPlot(logfc, logcpm, k, saveas=False, xlab="log2(CPM)", ylab="log2(Fold Change)", s=5, cex='xx-small'):
'''
logfc is expected to be log fold-changes
logcpm is expected to be log counts per million (I think)
smearplot in poreminion was x=logfc, y=logcpm.
But logcpm is the log average over both groups.
Thus logcpm vs logfc is essentially an MA plot.
So the old smear plot was just an MA plot forcing you to turn your head.
'''
x = [e for e in logcpm]
y = [e for e in logfc]
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
def alphabeticalPlot(y, k, saveas=False, xlab="kmer", ylab="log2(FC)", s=5, cex='xx-small'):
'''Assumes given x is in same order as k.
Example of y = logfc'''
x = range(len(k))
k, y = zip(*sorted(zip(k,y)))
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
def gcPlot(y, k, saveas=False, xlab="Percent GC", ylab="log2(FC)", s=5, cex='xx-small'):
'''Assumes given x is in same order as k.
Example of y = logfc'''
x = [gcbases(e) for e in k]
x, k, y = zip(*sorted(zip(x,k,y)))
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
def complexityPlot(y, k, saveas=False, xlab="Percent GC", ylab="log2(FC)", s=5, cex='xx-small'):
'''Assumes given x is in same order as k.
Example of y = logfc'''
x = [gcbases(e) for e in k]
x, k, y = zip(*sorted(zip(x,k,y)))
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
def compressionPlot(y, k, saveas=False, xlab="Compression Length", ylab="log2(FC)", s=5, cex='xx-small'):
'''Assumes given x is in same order as k.
Example of y = logfc'''
compress_lens = kmer_compression(k=len(k[0]))
x = [compress_lens[e] for e in k]
x, k, y = zip(*sorted(zip(x,k,y)))
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
def twoTableKmerScatterPlotEdgeR(edgeRobj,saveas=False, xlab="TMM Norm Reference Count", ylab="TMM Norm Test Count", s=5, cex='xx-small'):
nc = edgeRobj.get_normalized_counts()
x = list(nc[:,0])
y = list(nc[:,1])
k = list(edgeRobj.get_dge_list_genes())
#print x[:10], len(x)
#print y[:10], len(y)
#print k, len(k)
general_scatter(x, y, k, saveas, xlab, ylab, s, cex)
##### REPLACING SOON
def median_norm_analysis(kmercounts, refcounts, scale_to_ref=False, log_it=False, base=2):
'''This function will/should be replaced by the MedNorm class'''
## read in and equilibrate the 2 kmer count tables
kmerdict, refdict = readInTwoKmerTables(kmercounts, refcounts)
## make approp data structures
test = kmerDictToPlotData(kmerdict)
reference = kmerDictToPlotData(refdict)
## Scale Test to Reference
test_z, test_med, test_mad = median_normalize(test['counts'])
ref_z, ref_med, ref_mad = median_normalize(reference['counts'])
## Normalize to both ref_spread and ref_median -- this is analogous to comparing z-scores, so makes more sense to me
test_z_to_ref = (test_z*ref_mad) + ref_med
ref_z_to_ref = (ref_z*ref_mad) + ref_med
## Normalize to just ref_median, retaining test_spread
## test_z = (test_z*test_mad) + ref_med
## Can only take log of positive numbers.
## For now this assumes that scaling to the reference returns only positive numbers
## But this needs to be revisited as that assumption can be easily violated
log_test_z_to_ref = logbase(test_z, base)
log_ref_z_to_ref = logbase(ref_z, base)
## Return results - 3 tuples
return (test_z, test_med, test_mad), (ref_z, ref_med, ref_mad), (test_z_to_ref, ref_z_to_ref)
def twoTableKmer_MA_Plot_(kmercounts, refcounts, saveas=False, scale_to_ref=False, logplot=False, base=2, s=5, cex='xx-small'):
''' kmerdict is a defaultdict(int)
It can take both empty and non-empty kmerdicts
returns update of the input kmerdict given the input string and k'''
## read in and equilibrate the 2 kmer count tables
kmerdict, refdict = readInTwoKmerTables(kmercounts, refcounts)
## make approp data structures
test = kmerDictToPlotData(kmerdict)
reference = kmerDictToPlotData(refdict)
## Scale Test to Reference
test_z, test_med, test_mad = median_normalize(test['counts'])
ref_z, ref_med, ref_mad = median_normalize(reference['counts'])
## Optional: Scale Test to Reference
if scale_to_ref:
## Normalize to both ref_spread and ref_median -- this is analogous to comparing z-scores, so makes more sense to me
test_z = (test_z*ref_mad) + ref_med
ref_z = (ref_z*ref_mad) + ref_med
## Normalize to just ref_median, retaining test_spread
## test_z = (test_z*test_mad) + ref_med
## Optional: Log
if logplot:
test_z = logbase(test_z, base)
ref_z = logbase(ref_z, base)
## Get avg counts (x-axis)
avg_z = (test_z + ref_z) / 2.0
## Get difference (y-axis)
diffs = test_z - ref_z
## Handle axis labels
ylab = 'Difference: Test - Reference'
xlab = 'Average Counts'
if logplot:
ylab = 'Log'+str(base) + ' ' + ylab
xlab = 'Average Log' +str(base) + ' Counts'
if scale_to_ref:
ylab += '\n(scaled to reference)'
## Scatter
x = avg_z
y = diffs
k = test['kmers']
general_scatter(x, y, k, saveas, xlab, ylab, s, cex) ## formerly s=10, now s=5
| mit |
qifeigit/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/plotting/test_datetimelike.py | 7 | 47670 | from datetime import datetime, timedelta, date, time
import nose
from pandas.compat import lrange, zip
import numpy as np
from pandas import Index, Series, DataFrame
from pandas.tseries.index import date_range, bdate_range
from pandas.tseries.offsets import DateOffset
from pandas.tseries.period import period_range, Period, PeriodIndex
from pandas.tseries.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean, slow
import pandas.util.testing as tm
from pandas.tests.plotting.common import (TestPlotBase,
_skip_if_no_scipy_gaussian_kde)
""" Test cases for time series specific (freq conversion, etc) """
@tm.mplskip
class TestTSPlot(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def tearDown(self):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
import matplotlib.pyplot as plt # noqa
df = DataFrame(np.random.randn(10, 9), index=range(10))
ax = df.plot(fontsize=2)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
self.assertEqual(label.get_fontsize(), 2)
@slow
def test_frame_inferred(self):
# inferred freq
import matplotlib.pyplot as plt # noqa
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_is_error_nozeroindex(self):
# GH11858
i = np.array([1, 2, 3])
a = DataFrame(i, index=i)
_check_plot_works(a.plot, xerr=a)
_check_plot_works(a.plot, yerr=a)
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}, idx)
ax = df.plot() # it works
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
plt.close(plt.gcf())
self.assertRaises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
ax = plt.gca()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
ax = ts.plot(style='k')
color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.)
self.assertEqual(color, ax.get_lines()[0].get_color())
def test_both_style_and_color(self):
import matplotlib.pyplot as plt # noqa
ts = tm.makeTimeSeries()
self.assertRaises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
self.assertRaises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot)
def test_get_datevalue(self):
from pandas.tseries.converter import get_datevalue
self.assertIsNone(get_datevalue(None, 'D'))
self.assertEqual(get_datevalue(1987, 'A'), 1987)
self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'),
Period('1987-12', 'M').ordinal)
self.assertEqual(get_datevalue('1/1/1987', 'D'),
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
self.assertEqual(expected_string,
ax.format_coord(first_x, first_y))
except (ValueError):
raise nose.SkipTest("skipping test because issue forming "
"test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3,
freq='A-DEC'))
check_format_of_first_point(annual.plot(), 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
check_format_of_first_point(daily.plot(),
't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
import matplotlib.pyplot as plt
from pandas.tseries.plotting import tsplot
tsplot(annual, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014 y = 1.000000')
tsplot(daily, plt.Axes.plot)
check_format_of_first_point(plt.gca(), 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ax = ts.plot()
self.assertFalse(hasattr(ax, 'freq'))
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(
2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.tseries.converter as conv
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
ax = df.plot()
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
self.assertEqual(xp, rs)
@slow
def test_irreg_hf(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
fig.add_subplot(111)
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.ix[[0, 1, 3, 4]]
ax = irreg.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
self.assertTrue((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all(
))
plt.clf()
fig.add_subplot(111)
df2 = df.copy()
df2.index = df.index.asobject
ax = df2.plot()
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
self.assertTrue((np.fabs(diffs[1:] - sec) < 1e-8).all())
def test_irregular_datetime64_repr_bug(self):
import matplotlib.pyplot as plt
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
ret = ser.plot()
self.assertIsNotNone(ret)
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
self.assertEqual(rs, xp)
def test_business_freq(self):
import matplotlib.pyplot as plt # noqa
bts = tm.makePeriodSeries()
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
bts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'B')
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
ax = bts.plot()
self.assertEqual(ax.get_lines()[0].get_xydata()[0, 0],
ts.index[0].ordinal)
idx = ax.get_lines()[0].get_xdata()
self.assertEqual(PeriodIndex(data=idx).freqstr, 'M')
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(
minutes=30))
df = DataFrame(np.arange(24), index=idx)
ax = df.plot()
rs = ax.get_lines()[0].get_xdata()
self.assertFalse(Index(rs).is_normalized)
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
ax = bts.plot()
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
import matplotlib.pyplot as plt
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(result[0], xlim[0] - 5)
self.assertEqual(result[1], xlim[1] + 10)
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
self.assertEqual(int(result[0]), expected[0].ordinal)
self.assertEqual(int(result[1]), expected[1].ordinal)
fig = ax.get_figure()
plt.close(fig)
ser = tm.makeTimeSeries()
ax = ser.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
ax = df.plot()
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.tseries.converter as conv
self.assertEqual(conv.get_finder('B'), conv._daily_finder)
self.assertEqual(conv.get_finder('D'), conv._daily_finder)
self.assertEqual(conv.get_finder('M'), conv._monthly_finder)
self.assertEqual(conv.get_finder('Q'), conv._quarterly_finder)
self.assertEqual(conv.get_finder('A'), conv._annual_finder)
self.assertEqual(conv.get_finder('W'), conv._daily_finder)
@slow
def test_finder_daily(self):
import matplotlib.pyplot as plt
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
import matplotlib.pyplot as plt
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
import matplotlib.pyplot as plt
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, xp)
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(xp, rs)
plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
self.assertEqual(rs, xp)
@slow
def test_finder_annual(self):
import matplotlib.pyplot as plt
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
self.assertEqual(rs, Period(xp[i], freq='A').ordinal)
plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
self.assertEqual(rs, xp)
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
ax = ser.plot()
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
self.assertEqual(rs, xp)
@slow
def test_gaps(self):
import matplotlib.pyplot as plt
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
ax = ts.plot()
lines = ax.get_lines()
tm._skip_if_mpl_1_5()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
ax = ts.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
ax = ser.plot()
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
l = lines[0]
data = l.get_xydata()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[2:5, 1].all())
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
ax = low.plot()
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(len(ax.right_ax.get_lines()), 1)
l = lines[0]
data = l.get_xydata()
tm._skip_if_mpl_1_5()
tm.assertIsInstance(data, np.ma.core.MaskedArray)
mask = data.mask
self.assertTrue(mask[5:25, 1].all())
@slow
def test_secondary_y(self):
import matplotlib.pyplot as plt
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(),
self.default_tick_position)
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
self.assertFalse(hasattr(ax, 'left_ax'))
self.assertTrue(hasattr(ax, 'right_ax'))
self.assertTrue(hasattr(ax2, 'left_ax'))
self.assertFalse(hasattr(ax2, 'right_ax'))
@slow
def test_secondary_y_ts(self):
import matplotlib.pyplot as plt
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
ax = ser.plot(secondary_y=True)
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
self.assertEqual(ax.get_yaxis().get_ticks_position(), 'right')
self.assertFalse(axes[0].get_yaxis().get_visible())
plt.close(fig)
ax2 = ser2.plot()
self.assertEqual(ax2.get_yaxis().get_ticks_position(),
self.default_tick_position)
plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
self.assertTrue(ax.get_yaxis().get_visible())
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
import matplotlib.pyplot as plt # noqa
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='density')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
ax = ser.plot(secondary_y=True, kind='bar')
fig = ax.get_figure()
axes = fig.get_axes()
self.assertEqual(axes[1].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(),
self.default_tick_position)
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
self.assertEqual(axes[0].get_yaxis().get_ticks_position(), 'right')
self.assertEqual(axes[1].get_yaxis().get_ticks_position(),
self.default_tick_position)
self.assertEqual(axes[2].get_yaxis().get_ticks_position(), 'right')
def test_mixed_freq_regular_first(self):
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first(self):
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
s2.plot(style='g')
ax = s1.plot()
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s1.plot()
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
self.assertTrue(idx1.equals(s1.index.to_period('B')))
self.assertTrue(idx2.equals(s2.index.to_period('B')))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
self.assertEqual(left, pidx[0].ordinal)
self.assertEqual(right, pidx[-1].ordinal)
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
import matplotlib.pyplot as plt # noqa
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
ax = s2.plot(style='g')
ax = s1.plot(ax=ax)
self.assertFalse(hasattr(ax, 'freq'))
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
ax = ts.plot()
ts2.plot(style='r')
self.assertEqual(ax.lines[0].get_xdata()[0],
ax.lines[1].get_xdata()[0])
@slow
def test_mixed_freq_lf_first(self):
import matplotlib.pyplot as plt
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot(legend=True)
ax = high.plot(legend=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'D')
leg = ax.get_legend()
self.assertEqual(len(leg.texts), 2)
plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'T')
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
irreg.plot()
ps.plot()
def test_mixed_freq_shared_ax(self):
# GH13341, using sharex=True
idx1 = date_range('2015-01-01', periods=3, freq='M')
idx2 = idx1[:1].union(idx1[2:])
s1 = Series(range(len(idx1)), idx1)
s2 = Series(range(len(idx2)), idx2)
fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
s1.plot(ax=ax1)
s2.plot(ax=ax2)
self.assertEqual(ax1.freq, 'M')
self.assertEqual(ax2.freq, 'M')
self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
ax2.lines[0].get_xydata()[0, 0])
# using twinx
fig, ax1 = self.plt.subplots()
ax2 = ax1.twinx()
s1.plot(ax=ax1)
s2.plot(ax=ax2)
self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
ax2.lines[0].get_xydata()[0, 0])
# TODO (GH14330, GH14322)
# plotting the irregular first does not yet work
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# s2.plot(ax=ax1)
# s1.plot(ax=ax2)
# self.assertEqual(ax1.lines[0].get_xydata()[0, 0],
# ax2.lines[0].get_xydata()[0, 0])
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
high.plot()
ax = low.plot()
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(high, plt.Axes.plot)
lines = tsplot(low, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot()
expected_h = idxh.to_period().asi8.astype(np.float64)
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562], dtype=np.float64)
for l in ax.get_lines():
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
import matplotlib.pyplot as plt
tsplot(low, plt.Axes.plot)
lines = tsplot(high, plt.Axes.plot)
for l in lines:
self.assertTrue(PeriodIndex(data=l.get_xdata()).freq, idxh.freq)
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
self.assert_numpy_array_equal(xdata, expected_l)
else:
self.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = low.plot(kind=kind1, stacked=True)
ax = high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(l.get_xdata()).freq, idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
# check stacked values are correct
expected_y += low[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq,
idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
ax = high.plot(kind=kind1, stacked=True)
ax = low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq,
idxh.freq)
self.assert_numpy_array_equal(
l.get_xdata(orig=False), expected_x)
expected_y += high[i].values
self.assert_numpy_array_equal(
l.get_ydata(orig=False), expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq,
idxh.freq)
self.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += low[i].values
self.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
high.plot()
ax = low.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
tm.close()
# low to high
low.plot()
ax = high.plot()
self.assertEqual(len(ax.get_lines()), 2)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(data=l.get_xdata()).freq, 'L')
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_check_plot_works(df.plot)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
self.assertEqual(xp, rs)
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
ax = df.plot()
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
# TODO: unused?
# us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
self.assertEqual(xp, rs)
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
low.plot()
ax = high.plot(secondary_y=True)
for l in ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
for l in ax.left_ax.get_lines():
self.assertEqual(PeriodIndex(l.get_xdata()).freq, 'D')
@slow
def test_secondary_legend(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
plt.clf()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B (right)')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'C'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
self.assertEqual(leg.get_texts()[2].get_text(), 'C')
self.assertEqual(leg.get_texts()[3].get_text(), 'D')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'])
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A (right)')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = df.plot(kind='bar', secondary_y=['A'], mark_right=False)
leg = ax.get_legend()
self.assertEqual(leg.get_texts()[0].get_text(), 'A')
self.assertEqual(leg.get_texts()[1].get_text(), 'B')
plt.clf()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
# non-ts
df = tm.makeDataFrame()
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
plt.clf()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'])
leg = ax.get_legend()
self.assertEqual(len(leg.get_lines()), 4)
self.assertIsNone(ax.right_ax.get_legend())
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
self.assertEqual(len(colors), 4)
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
ax = df.plot()
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
self.assertEqual(l.get_rotation(), 30)
@slow
def test_ax_plot(self):
import matplotlib.pyplot as plt
x = DatetimeIndex(start='2012-01-02', periods=10, freq='D')
y = lrange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot(x, y, label='Y')
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
import matplotlib.pyplot as plt
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
ax = ts_irregular[:5].plot()
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
ax = s1.plot()
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
self.assertEqual(left_before, left_after)
self.assertTrue(right_before < right_after)
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
ax = ts.plot()
left_before, right_before = ax.get_xlim()
ts.resample('D').mean().plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
self.assertEqual(left_before, left_after)
self.assertEqual(right_before, right_after)
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
ax = ts_irregular[:5].plot()
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
self.assertEqual(left, ts_irregular.index.min().toordinal())
self.assertEqual(right, ts_irregular.index.max().toordinal())
def test_plot_outofbounds_datetime(self):
# 2579 - checking this does not raise
values = [date(1677, 1, 1), date(1677, 1, 2)]
self.plt.plot(values)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
self.plt.plot(values)
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
ArijanaBrlek/superset | setup.py | 3 | 2532 | import os
import subprocess
import json
from setuptools import setup, find_packages
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
PACKAGE_DIR = os.path.join(BASE_DIR, 'superset', 'static', 'assets')
PACKAGE_FILE = os.path.join(PACKAGE_DIR, 'package.json')
with open(PACKAGE_FILE) as package_file:
version_string = json.load(package_file)['version']
def get_git_sha():
try:
s = str(subprocess.check_output(['git', 'rev-parse', 'HEAD']))
return s.strip()
except:
return ""
GIT_SHA = get_git_sha()
version_info = {
'GIT_SHA': GIT_SHA,
'version': version_string,
}
print("-==-" * 15)
print("VERSION: " + version_string)
print("GIT SHA: " + GIT_SHA)
print("-==-" * 15)
with open(os.path.join(PACKAGE_DIR, 'version_info.json'), 'w') as version_file:
json.dump(version_info, version_file)
setup(
name='superset',
description=(
"A interactive data visualization platform build on SqlAlchemy "
"and druid.io"),
version=version_string,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=['superset/bin/superset'],
install_requires=[
'boto3==1.4.4',
'celery==3.1.23',
'cryptography==1.7.2',
'flask-appbuilder==1.8.1',
'flask-cache==0.13.1',
'flask-migrate==2.0.3',
'flask-script==2.0.5',
'flask-sqlalchemy==2.0',
'flask-testing==0.6.2',
'flask-wtf==0.14.2',
'future>=0.16.0, <0.17',
'humanize==0.5.1',
'gunicorn==19.7.1',
'markdown==2.6.8',
'pandas==0.19.2',
'parsedatetime==2.0.0',
'pydruid==0.3.1',
'PyHive>=0.3.0',
'python-dateutil==2.6.0',
'requests==2.13.0',
'simplejson==3.10.0',
'six==1.10.0',
'sqlalchemy==1.1.9',
'sqlalchemy-utils==0.32.14',
'sqlparse==0.2.3',
'thrift>=0.9.3',
'thrift-sasl>=0.2.1',
],
extras_require={
'cors': ['Flask-Cors>=2.0.0'],
},
tests_require=[
'codeclimate-test-reporter',
'coverage',
'mock',
'nose',
],
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/airbnb/superset',
download_url=(
'https://github.com/airbnb/superset/tarball/' + version_string),
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| apache-2.0 |
gotomypc/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
martinwicke/tensorflow | tensorflow/python/client/notebook.py | 33 | 4608 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
| apache-2.0 |
cmaass/swimmertracking | parametergui.py | 1 | 62991 | #!/usr/bin/env python
#Author: [email protected]
#License: GPL
import wx
import wx.lib.scrolledpanel as scp
import numpy as np
from PIL import Image
from matplotlib import pylab as pl
from matplotlib import cm
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
import os
import io
import pickle
import cv2
import subprocess
import re
import readtraces as rt
from random import randint
from sys import exc_info
#this directory definition is changed in the source code at runtime which is probably a really bad idea but good for portability
moviedir='/media/Corinna2/4MAGDEBOURG/Ribbonmotion/'#end
def GetBitmap(width=1, height=1, colour = (0,0,0) ):
"""Helper funcion to generate a wxBitmap of defined size and colour.
Prettier (and possibly less embarassing) than putting an empty bitmap on your GUI showing whatever garbage is still in your bitmap buffer.
Source: wxpython wiki"""
ar = np.zeros( (height, width, 3),'uint8')
ar[:,:,] = colour
image = wx.EmptyImage(width,height)
image.SetData(ar.tostring())
wxBitmap = image.ConvertToBitmap() # OR: wx.BitmapFromImage(image)
return wxBitmap
class InfoWin(wx.Frame):
"""Child window displaying image detection parameters for the current movie.
Parameters are taken from the parent window's parameters attribute (a dictionary).
Update methods needs to be called to catually show anything."""
#TODO: is TopWindow actually the parent and we don't need to pass it?
def __init__(self, parent):
wx.Frame.__init__(self, wx.GetApp().TopWindow, -1, "Movie info", size=(400,400)) #init parent class
self.infotext=''
self.text=wx.TextCtrl(self,-1, self.infotext, style=wx.TE_MULTILINE)
self.text.SetEditable(False)
sizer=wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.text,5, wx.EXPAND|wx.ALL,5)
self.SetSizer(sizer)
self.parent=parent
def Update(self,text=''):
"""Reads in and displays current set of parameters from parent."""
paras=self.parent.parameters
if text=="":
self.infotext=""
for k in paras.keys():
self.infotext+="%s: %s\n"%(k,paras[k])
self.infotext=self.infotext[:-1]
else: self.infotext=text
self.text.SetValue(self.infotext)
#source:
#http://matplotlib.org/examples/user_interfaces/embedding_in_wx2.html
class StackWin(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,-1, 'Stack plot',size=(550,350))
self.parent=parent
self.SetBackgroundColour(wx.NamedColor("WHITE"))
self.figure = Figure()
self.canvas = FigureCanvas(self, -1, self.figure)
self.axes = Axes3D(self.figure)
try:
data=np.loadtxt(self.parent.movie.datadir+'coords.txt')
xsc,ysc,zsc=self.parent.movie.parameters['xscale'],self.parent.movie.parameters['yscale'],self.parent.movie.parameters['zscale']
xs=data[:,3]*xsc
ys=data[:,4]*ysc
zs=data[:,0]*zsc
ss=data[:,2]*xsc/72.
self.axes.scatter(xs, ys, zs,s=ss)
scaling = np.array([getattr(self.axes, 'get_{}lim'.format(dim))() for dim in 'xyz'])
self.axes.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
except:
print "sorry, plot failed! Is there a coordinate file?"
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.add_toolbar() # comment this out for no toolbar
def add_toolbar(self):
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
def OnPaint(self, event):
self.canvas.draw()
class HistoWin(wx.Frame):
"""Window displaying RGB histogram of current image in semilog y. """
def __init__(self,parent,image):
wx.Frame.__init__(self, wx.GetApp().TopWindow, -1, "Image histogram", size=(600,400)) #init parent class
if len(image.shape)==2: image=np.dstack((image,image,image)) #in case this isn't 3 channels already (e.g. greyscale)
if len(image.shape)==3 and image.shape[2]!=3: image=np.dstack((image[:,:,0],image[:,:,0],image[:,:,0]))
buf=io.BytesIO() #
pl.figure(figsize=[6,4])
for i in range(3):
pl.hist(image[:,:,i].flatten(), bins=256, log=True, histtype='step',align='mid',color='rgb'[i])
pl.savefig(buf,dpi=100,format='png')
buf.seek(0)
im=wx.ImageFromStream(buf)
self.plot=wx.StaticBitmap(self,bitmap=wx.BitmapFromImage(im),size=(600,400),pos=(0,0))
def Update(self,parent,image):
"""Updates histogram with data from currently displayed image."""
pl.close("all")
buf=io.BytesIO()
pl.figure(figsize=[6,4])
if len(image.shape)==2: image=np.dstack((image,image,image))
if len(image.shape)==3 and image.shape[2]!=3: image=np.dstack((image[:,:,0],image[:,:,0],image[:,:,0]))
for i in range(3):
pl.hist(image[:,:,i].flatten(), bins=np.arange(256), log=True, histtype='step',align='mid', color='rgb'[i])
pl.xlim(0,255)
pl.savefig(buf,dpi=100,format='png')
buf.seek(0)
im=wx.ImageFromStream(buf)
self.plot.SetBitmap(wx.BitmapFromImage(im))
class MyPanel(scp.ScrolledPanel):
"""Scrolled panel containing movie frame image."""
def __init__(self, parent):
scp.ScrolledPanel.__init__(self,parent) #init parent class
self.SetScrollRate(20,20)
self.EnableScrolling(True,True)
self.parent=parent
self.im=MyImage(self)
imbox=wx.BoxSizer(wx.HORIZONTAL)
imbox.Add(self.im)
self.SetSizer(imbox)
self.im.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))
class MyImage(wx.StaticBitmap):
"""Image displaying current movie frame (24 bit RGB, scalable, zoomable)."""
def __init__(self, parent):
self.parent=parent #stuff needed before parent initialisation
self.pparent=parent.parent
col=self.parent.GetBackgroundColour()
self.scale=1. #implements zooom control in parent
wx.StaticBitmap.__init__(self, parent,-1,GetBitmap(colour=col), (5,5)) #init parent class
self.axes=[]
self.points=[]
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.savestatus=wx.GetTopLevelParent(self).sb.GetStatusText(0)
self.savept=(0,0)
def OnLeftDown(self,event):
ctrlstate=wx.GetKeyState(wx.WXK_CONTROL)
if not ctrlstate:
pt=event.GetPosition()
pt=self.parent.CalcUnscrolledPosition(pt)
self.savestatus=self.pparent.sb.GetStatusText(0)
if len(self.pparent.images[self.pparent.imType].shape)==2:
RGB=", grey %d"%self.pparent.images[self.pparent.imType][pt[1],pt[0]]
else:
RGB=", RGB (%d,%d,%d)"%tuple(self.pparent.images[self.pparent.imType][pt[1],pt[0],:])
self.pparent.sb.SetStatusText("x %d, y %d"%(pt.x/self.scale,pt.y/self.scale)+RGB, 0)
else:
pt=event.GetPosition()
pt=self.parent.CalcUnscrolledPosition(pt)
self.savestatus=self.pparent.sb.GetStatusText(0)
if len(self.pparent.images[self.pparent.imType].shape)==2:
RGB=", grey %d"%self.pparent.images[self.pparent.imType][pt[1],pt[0]]
else:
RGB=", RGB (%d,%d,%d)"%tuple(self.pparent.images[self.pparent.imType][pt[1],pt[0],:])
self.pparent.sb.SetStatusText("x %d, y %d with control"%(pt.x/self.scale,pt.y/self.scale)+RGB, 0)
def OnLeftUp(self,event):
self.pparent.sb.SetStatusText(self.savestatus, 0)
def OnRightDown(self,event):
if self.pparent.movie.typ=="3D stack":
pt=event.GetPosition()
self.savept=self.parent.CalcUnscrolledPosition(pt)
def OnRightUp(self,event):
if self.pparent.movie.typ=="3D stack":
oldcrop=self.pparent.movie.parameters['crop']
pt=event.GetPosition()
pt=self.parent.CalcUnscrolledPosition(pt)
self.pparent.movie.parameters['crop']=[oldcrop[1]+int(self.savept[1]/self.scale),oldcrop[0]+int(self.savept[0]/self.scale),
oldcrop[1]+int(pt[1]/self.scale),oldcrop[0]+int(pt[0]/self.scale)]
self.pparent.parameters['crop']=self.pparent.movie.parameters['crop']
self.pparent.stCropContr.SetValue(str(self.pparent.movie.parameters['crop'])[1:-1])
self.pparent.StImgDisplay()
def Redraw(self):
"""Actually display an image. Accepts both filename (no existence/file type check, though) as string or imge as numpy array."""
image=self.pparent.images[self.pparent.imType] #types: orig, bg, subtracted, threshold, orig with particles.
if image !='':
if type(image).__name__=='ndarray':
if len(image.shape)==2: image=np.dstack((image,image,image))
im = wx.EmptyImage(image.shape[1],image.shape[0])
im.SetData(image.astype(np.uint8).tostring())
if type(image).__name__=='str':
im=wx.Image(image)
im.Rescale(im.GetSize()[0]*self.scale,im.GetSize()[1]*self.scale)
bit=wx.BitmapFromImage(im)
#ds=wx.GetDisplaySize()
ds=wx.Display(0).GetGeometry().GetSize()
ws=(im.GetSize()[0]+120,im.GetSize()[1]+300)
if ws[0]<ds[0] and ws[1]<ds[1]:
winsize=ws
else:
winsize=ds
self.pparent.SetSize(winsize)
self.SetBitmap(bit)
class MyFrame(wx.Frame):
"""Main window of movie analysis GUI"""
def __init__(self):
wx.Frame.__init__(self, None, -1, "Particle detection parameters", size=(1024,768))
#buttons, radio buttons and stuff.
self.sb = self.CreateStatusBar(2)
self.scp=MyPanel(self)
paraPanel=wx.Panel(self)
buttonPanel=wx.Panel(self)
nbPanel = wx.Panel(self)
threshLabel=wx.StaticText(paraPanel,-1,'Threshold')
self.threshContr=wx.TextCtrl(paraPanel,200,'',size=(50,-1),style=wx.TE_PROCESS_ENTER)
BGrngLabel=wx.StaticText(paraPanel,-1,'BG range')
self.BGrngContr=wx.TextCtrl(paraPanel,-1,'120,155',size=(50,-1),style=wx.TE_PROCESS_ENTER)
strLabel=wx.StaticText(paraPanel,-1,'Kernel size')
self.strContr=wx.TextCtrl(paraPanel,201,'',size=(50,-1), style=wx.TE_PROCESS_ENTER)
self.frameSldr = wx.Slider(paraPanel,202,value=0, minValue=0, maxValue=100, style=wx.SL_HORIZONTAL)
self.fwdB=wx.Button(paraPanel,203,">",size=(30,-1))
self.backB=wx.Button(paraPanel,204,"<",size=(30,-1))
self.frameContr=wx.TextCtrl(paraPanel,205,'0',size=(60,-1),style=wx.TE_PROCESS_ENTER)
psizeLabel=wx.StaticText(paraPanel,-1,'Part. size')
self.psizeContr=wx.TextCtrl(paraPanel,206,'',size=(80,-1), style=wx.TE_PROCESS_ENTER)
self.sizeCheck=wx.CheckBox(paraPanel, 211, label='Draw sizes')
self.invCheck=wx.CheckBox(paraPanel, 212, label='Invert')
self.maskCheck=wx.CheckBox(paraPanel, 213, label='Mask')
self.diskfitCheck=wx.CheckBox(paraPanel, 214, label='Fit disk')
channelCBlabel=wx.StaticText(paraPanel,-1,'RGB channel')
self.channelCB=wx.ComboBox(paraPanel, 207, choices=['0','1','2'], style=wx.CB_READONLY,size=(50,-1))
self.channelCB.SetValue('0')
frameminmaxLabel=wx.StaticText(paraPanel,-1,'Range')
self.frameMinMaxContr=wx.TextCtrl(paraPanel,-1,'',size=(60,-1),style=wx.TE_PROCESS_ENTER)
framespacLabel=wx.StaticText(paraPanel,-1,'Frame spacing')
self.frameSpacContr=wx.TextCtrl(paraPanel,210,'',size=(40,-1),style=wx.TE_PROCESS_ENTER)
sphericityLabel=wx.StaticText(paraPanel,-1,'Sphericity')
self.sphericityContr=wx.TextCtrl(paraPanel,208,'-1.',size=(40,-1), style=wx.TE_PROCESS_ENTER)
blurLabel=wx.StaticText(paraPanel,-1,'Blur')
self.blurContr=wx.TextCtrl(paraPanel,209,'',size=(50,-1), style=wx.TE_PROCESS_ENTER)
savePsB=wx.Button(buttonPanel,100,"Save parameters...",size=(140,-1))
readPsB=wx.Button(buttonPanel,101,"Read parameters...",size=(140,-1))
paraB=wx.Button(buttonPanel,103,"Show parameters...",size=(140,-1))
histoB=wx.Button(buttonPanel,104,"Histogram...",size=(140,-1))
expImB=wx.Button(buttonPanel,108,"Export image...",size=(140,-1))
self.zoomBox = wx.ComboBox(buttonPanel, 400, choices=['20%','50%','100%','150%','200%','300%','400%'], size=(140,-1))
self.zoomBox.SetValue('100%')
#note: any control that will be accessed from inside a method needs the "self." prefix to make it available within the scope of the entire class.
#e.g. the two following buttons are disabled/enabled during movie processing.
self.nb = wx.Notebook(nbPanel)
parttab=wx.Panel(self.nb)
openMovB=wx.Button(parttab,102,"Open movie...",size=(140,-1))
self.getTrajB=wx.Button(parttab,105,"Get trajectories",size=(140,-1))
self.getCoordB=wx.Button(parttab,106,"Get coordinates",size=(140,-1))
self.getBgB=wx.Button(parttab,107,"Get background",size=(140,-1))
self.rImstate = [wx.RadioButton(parttab, 300, label='Original',style=wx.RB_GROUP),
wx.RadioButton(parttab, 301, label='Single channel'),
wx.RadioButton(parttab, 302, label='Background'),
wx.RadioButton(parttab, 303, label='Mask'),
wx.RadioButton(parttab, 304, label='BG treated'),
wx.RadioButton(parttab, 305, label='Threshold'),
wx.RadioButton(parttab, 306, label='Particles')]
self.rImstate[0].SetValue(True)
#next tab here!
clustertab=wx.Panel(self.nb)
openClMovB=wx.Button(clustertab,550,"Open movie...",size=(140,-1))
self.getCluB=wx.Button(clustertab,501,"Get clusters",size=(140,-1))
self.convTrajCluB=wx.Button(clustertab,502,"Convert trajectories...",size=(140,-1))
self.voroCheck=wx.CheckBox(clustertab, -1, label='Voronoi')
self.clustNumCheck=wx.CheckBox(clustertab, -1, label='Label clusters')
self.clustNumCheck.SetValue(True)
self.cImstate = [wx.RadioButton(clustertab, 520, label='Original',style=wx.RB_GROUP),
wx.RadioButton(clustertab, 521, label='Single channel'),
wx.RadioButton(clustertab, 522, label='Blur'),
wx.RadioButton(clustertab, 523, label='Mask'),
wx.RadioButton(clustertab, 524, label='Threshold'),
wx.RadioButton(clustertab, 525, label='Clusters'),
wx.RadioButton(clustertab, 526, label='Voronoi')]
self.cImstate[0].SetValue(True)
if rt.vorflag:
self.voroCheck.SetValue(True)
else:
self.voroCheck.SetValue(False)
self.voroCheck.Disable()
self.cImstate[-1].Disable()
stacktab=wx.Panel(self.nb)
openStackB=wx.Button(stacktab,650,"Open 3D stack...",size=(140,-1))
stCropLabel=wx.StaticText(stacktab,-1,'crop l,b,r,t')
self.sImstate = [wx.RadioButton(stacktab, 620, label='Original',style=wx.RB_GROUP),
wx.RadioButton(stacktab, 621, label='Single channel'),
wx.RadioButton(stacktab, 622, label='Threshold'),
wx.RadioButton(stacktab, 623, label='Particles')]
self.stCropContr=wx.TextCtrl(stacktab,651,'',size=(140,-1), style=wx.TE_PROCESS_ENTER)
stackResetCropB=wx.Button(stacktab,652,"Reset crop",size=(140,-1))
self.getStCoordB=wx.Button(stacktab,653,"Get coordinates",size=(140,-1))
plotStackB=wx.Button(stacktab,654,"Plot stack...",size=(140,-1))
#setting up the window layout with tons of nested sizers.
hboxBig=wx.BoxSizer(wx.HORIZONTAL)
vboxLeft=wx.BoxSizer(wx.VERTICAL)
vboxRight=wx.BoxSizer(wx.VERTICAL)
vboxLeft.Add(self.scp,1, wx.EXPAND|wx.ALL,5)
vboxPara=wx.BoxSizer(wx.VERTICAL)
hboxPara1=wx.BoxSizer(wx.HORIZONTAL)
hboxPara1.Add(threshLabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(self.threshContr, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(BGrngLabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(self.BGrngContr, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(strLabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(self.strContr, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(psizeLabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(self.psizeContr, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(sphericityLabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(self.sphericityContr, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(blurLabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara1.Add(self.blurContr, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxPara.Add(hboxPara1,0,wx.EXPAND)
hboxPara2=wx.BoxSizer(wx.HORIZONTAL)
hboxPara2.Add(self.sizeCheck, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara2.Add(self.invCheck, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara2.Add(self.maskCheck, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara2.Add(self.diskfitCheck, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara2.Add(channelCBlabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
hboxPara2.Add(self.channelCB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxPara.Add(hboxPara2,0,wx.EXPAND)
vboxPara.Add(self.frameSldr, 0, wx.ALIGN_BOTTOM|wx.ALL|wx.EXPAND,5)
hboxFrames=wx.BoxSizer(wx.HORIZONTAL)
hboxFrames.Add(self.backB, 0, wx.ALIGN_CENTER|wx.ALL,5)
hboxFrames.Add(self.frameContr, 0,wx.ALIGN_CENTER|wx.ALL,5)
hboxFrames.Add(self.fwdB, 0, wx.ALIGN_CENTER|wx.ALL,5)
hboxFrames.Add(frameminmaxLabel, 0, wx.ALIGN_RIGHT|wx.ALL,5)
hboxFrames.Add(self.frameMinMaxContr, 0, wx.ALIGN_RIGHT|wx.ALL,5)
hboxFrames.Add(framespacLabel, 0, wx.ALIGN_RIGHT|wx.ALL,5)
hboxFrames.Add(self.frameSpacContr, 0, wx.ALIGN_RIGHT|wx.ALL,5)
vboxPara.Add(hboxFrames,0,wx.EXPAND)
paraPanel.SetSizer(vboxPara)
vboxLeft.Add(paraPanel,0, wx.ALIGN_BOTTOM|wx.ALL,5)
vboxMov=wx.BoxSizer(wx.VERTICAL)
vboxMov.Add(self.zoomBox, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxMov.Add(savePsB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxMov.Add(readPsB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxMov.Add(paraB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxMov.Add(histoB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxMov.Add(expImB, 0, wx.ALIGN_LEFT|wx.ALL,5)
buttonPanel.SetSizer(vboxMov)
vboxRight.Add(buttonPanel,0, wx.ALIGN_RIGHT|wx.ALL,5)
vboxNB=wx.BoxSizer(wx.VERTICAL)
vboxPart=wx.BoxSizer(wx.VERTICAL)
vboxPart.Add(openMovB, 0, wx.ALIGN_LEFT|wx.ALL,5)
sbIm = wx.StaticBox(parttab, label="Image display")
sbsizerIm = wx.StaticBoxSizer(sbIm, wx.VERTICAL)
for i in range(7): sbsizerIm.Add(self.rImstate[i], 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxPart.Add(sbsizerIm)
sbAna = wx.StaticBox(parttab, label="Movie analysis")
sbsizerAna = wx.StaticBoxSizer(sbAna, wx.VERTICAL)
sbsizerAna.Add(self.getTrajB, 0, wx.ALIGN_LEFT|wx.ALL,5)
sbsizerAna.Add(self.getCoordB, 0, wx.ALIGN_LEFT|wx.ALL,5)
sbsizerAna.Add(self.getBgB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxPart.Add(sbsizerAna)
parttab.SetSizer(vboxPart)
self.nb.AddPage(parttab,'Particles')
vboxClusters=wx.BoxSizer(wx.VERTICAL)
vboxClusters.Add(openClMovB, 0, wx.ALIGN_LEFT|wx.ALL,5)
sbCl = wx.StaticBox(clustertab, label="Image display")
sbsizerCl = wx.StaticBoxSizer(sbCl, wx.VERTICAL)
for but in self.cImstate: sbsizerCl.Add(but, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxClusters.Add(sbsizerCl)
sbClAn = wx.StaticBox(clustertab, label="Movie analysis")
sbsizerClAn = wx.StaticBoxSizer(sbClAn, wx.VERTICAL)
sbsizerClAn.Add(self.getCluB, 0, wx.ALIGN_LEFT|wx.ALL,5)
sbsizerClAn.Add(self.convTrajCluB, 0, wx.ALIGN_LEFT|wx.ALL,5)
sbsizerClAn.Add(self.voroCheck, 0, wx.ALIGN_LEFT|wx.ALL,5)
sbsizerClAn.Add(self.clustNumCheck, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxClusters.Add(sbsizerClAn)
clustertab.SetSizer(vboxClusters)
self.nb.AddPage(clustertab,'Clusters')
vboxStack=wx.BoxSizer(wx.VERTICAL)
vboxStack.Add(openStackB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxStack.Add(stCropLabel, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxStack.Add(stackResetCropB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxStack.Add(self.stCropContr, 0, wx.ALIGN_LEFT|wx.ALL,5)
stacktab.SetSizer(vboxStack)
sbSt = wx.StaticBox(stacktab, label="Image display")
sbsizerSt = wx.StaticBoxSizer(sbSt, wx.VERTICAL)
for but in self.sImstate: sbsizerSt.Add(but, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxStack.Add(sbsizerSt)
vboxStack.Add(plotStackB, 0, wx.ALIGN_LEFT|wx.ALL,5)
vboxStack.Add(self.getStCoordB, 0, wx.ALIGN_LEFT|wx.ALL,5)
self.nb.AddPage(stacktab,'3D stack')
vboxNB.Add(self.nb)
nbPanel.SetSizer(vboxNB)
vboxRight.Add(nbPanel,0,wx.ALIGN_RIGHT)
hboxBig.Add(vboxLeft,1,wx.EXPAND)
hboxBig.Add(vboxRight,0,wx.ALIGN_RIGHT)
self.SetSizer(hboxBig)
#bind button/input events to class methods.
self.Bind(wx.EVT_BUTTON, self.SaveParas, id=100)
self.Bind(wx.EVT_BUTTON, self.ReadParasFromFile, id=101)
self.Bind(wx.EVT_BUTTON, self.OpenMovie, id=102)
self.Bind(wx.EVT_BUTTON, self.OpenClMovie, id=550)
self.Bind(wx.EVT_BUTTON, self.OpenStackMovie, id=650)
self.Bind(wx.EVT_BUTTON, self.ShowParas, id=103)
self.Bind(wx.EVT_BUTTON, self.ShowHistogram, id=104)
self.Bind(wx.EVT_BUTTON, self.GetTrajectories, id=105)
self.Bind(wx.EVT_BUTTON, self.GetCoordinates, id=106)
self.Bind(wx.EVT_BUTTON, self.GetBG, id=107)
self.Bind(wx.EVT_BUTTON, self.ExportImage, id=108)
# a number of controls handled by the same method.
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=200)
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=201)
self.Bind(wx.EVT_SLIDER, self.ReadParas, id=202)
self.Bind(wx.EVT_BUTTON, self.ReadParas, id=203)
self.Bind(wx.EVT_BUTTON, self.ReadParas, id=204)
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=205)
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=206)
self.Bind(wx.EVT_COMBOBOX, self.ReadParas, id=207)
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=208)
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=209)
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=210)
self.Bind(wx.EVT_CHECKBOX, self.ReadParas, id=211)
self.Bind(wx.EVT_CHECKBOX, self.ReadParas, id=212)
self.Bind(wx.EVT_CHECKBOX, self.ReadParas, id=213)
self.Bind(wx.EVT_CHECKBOX, self.ReadParas, id=214)
self.Bind(wx.EVT_TEXT_ENTER, self.ReadParas, id=651)
# bindings handling the image display (groupof radio buttons)
self.Bind(wx.EVT_BUTTON, self.GetClusters, id=501)
self.Bind(wx.EVT_BUTTON, self.ConvClustTraj, id=502)
self.Bind(wx.EVT_BUTTON, self.ResetCrop, id=652)
self.Bind(wx.EVT_BUTTON, self.GetCoordinates, id=653)
self.Bind(wx.EVT_BUTTON, self.PlotStack, id=654)
for i in range(300,307): self.Bind(wx.EVT_RADIOBUTTON, self.ImgDisplay, id=i)
for i in range(521,527): self.Bind(wx.EVT_RADIOBUTTON, self.ClImgDisplay, id=i)
for i in range(621,623): self.Bind(wx.EVT_RADIOBUTTON, self.StImgDisplay, id=i)
self.Bind(wx.EVT_COMBOBOX, self.Zoom, id=400)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_CHAR_HOOK,self.OnKeyDown) #Handles all key presses!
#parameters and definitions. We store everything as attributes of the main window.
self.imType='Original'
self.images={'Original':'','Single channel':'','Background':'', 'BG treated':'', 'Mask':'','Threshold':'', 'Particles':''}
self.moviefile=''
self.movie=rt.nomovie(moviedir+'/')
self.framenum=0
self.parameters={
'framerate':0.,'sphericity':-1.0,'xscale':1.0,'yscale':1.0,'zscale':1.0,
'imsize':(0,0),'blobsize':(5,90),'crop':[0]*4, 'framelim':(0,0), 'circle':[0,0,1e4],
'frames':0, 'threshold':120, 'struct':5, 'channel':0, 'blur':1,'spacing':1, 'imgspacing':-1,'maxdist':-1,'lossmargin':10, 'lenlim':1,
'sizepreview':True, 'invert':False, 'diskfit':False, 'mask':True
}
#erosion/dilation kernel. basically, a circle of radius "struct" as a numpy array.
if self.parameters['struct']>0: self.kernel= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.parameters['struct'],self.parameters['struct']))
else: self.kernel=False
self.strContr.SetValue(str(self.parameters['struct']))
self.threshContr.SetValue(str(self.parameters['threshold']))
self.psizeContr.SetValue(str(self.parameters['blobsize'])[1:-1])
self.frameMinMaxContr.SetValue("%d,%d"%self.parameters['framelim'])
self.frameSpacContr.SetValue("%d"%(self.parameters['spacing']))
self.blurContr.SetValue(str(self.parameters['blur']))
self.sizeCheck.SetValue(self.parameters['sizepreview'])
self.maskCheck.SetValue(self.parameters['mask'])
self.invCheck.SetValue(self.parameters['invert'])
self.diskfitCheck.SetValue(self.parameters['diskfit'])
self.cdir=moviedir
def OnKeyDown(self,event):
key=event.GetKeyCode()
ctrlstate=wx.GetKeyState(wx.WXK_CONTROL)
if ctrlstate:
if key==wx.WXK_LEFT:
event.SetId(-1)
self.framenum-=1
self.ReadParas(event)
if key==wx.WXK_RIGHT:
event.SetId(-1)
self.framenum+=1
self.ReadParas(event)
else: event.Skip()
def SaveParas(self,event):
"""saves """
if self.movie.typ!='none':
self.ShowParas()
if not os.path.exists(self.movie.datadir): os.mkdir(self.movie.datadir)
try:
with open(self.movie.datadir+'paras.txt','w') as f: f.write(self.infoWin.infotext)
except:
print "Unexpected error:", exc_info()[0]
pass
def OpenMovie(self,event=None):
dlg = wx.FileDialog(self, "Choose image", self.cdir, style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.moviefile=dlg.GetPath()
print self.moviefile
self.cdir=os.path.dirname(dlg.GetPath())+os.sep
print self.cdir
if os.name=='posix': #this assumes you installed mplayer!
with open(os.path.abspath(__file__), 'r') as f:
text=f.read()
text=re.sub("(?<=\nmoviedir=').*?(?='#end)",self.cdir,text)
with open(os.path.abspath(__file__), 'w') as f:
f.write(text)
#print ' '.join(['mplayer','-vo','null','-ao','null','-identify','-frames','0',self.moviefile])
result = subprocess.check_output(['mplayer','-vo','null','-ao','null','-identify','-frames','0',self.moviefile])
if os.name=='nt': #this assumes you installed mplayer and have the folder in your PATH!
result = subprocess.check_output(['mplayer.exe','-vo','null','-ao', 'null','-identify','-frames','0',self.moviefile])
try:
self.parameters['imsize']=(int(re.search('(?<=ID_VIDEO_WIDTH=)[0-9]+',result).group()),int(re.search('(?<=ID_VIDEO_HEIGHT=)[0-9]+',result).group()))
self.parameters['framerate']=float(re.search('(?<=ID_VIDEO_FPS=)[0-9.]+',result).group())
self.parameters['frames']=int(round(float(re.search('(?<=ID_LENGTH=)[0-9.]+',result).group())*self.parameters['framerate']))
self.parameters['framelim']=(0,self.parameters['frames'])
except:
self.parameters['imsize']=(0,0)
self.parameters['framerate']=1.
self.parameters['frames']=0
self.parameters['framelim']=(0,0)
self.movie=rt.movie(self.moviefile)
self.images['Original']=self.movie.getFrame(self.framenum)
self.frameSldr.SetMin(0)
self.frameSldr.SetMax(self.parameters['frames'])
self.frameSldr.SetValue(0)
self.frameContr.SetValue('0')
self.framenum=0
self.zoomBox.SetValue('100%')
self.scp.im.scale=1.
image=self.images['Original']
if type(image).__name__=='ndarray':
im = wx.EmptyImage(image.shape[1],image.shape[0])
im.SetData(image.tostring())
elif type(image).__name__=='str':
im=wx.Image(image)
else:
im=np.zeros(self.parameters['imsize'])
im.Rescale(im.GetSize()[0]*self.scp.im.scale,im.GetSize()[1]*self.scp.im.scale)
#ds=wx.GetDisplaySize()
ds=wx.Display(0).GetGeometry().GetSize()
ws=(im.GetSize()[0]+120,im.GetSize()[1]+300)
if ws[0]<ds[0] and ws[1]<ds[1]:
winsize=ws
else:
winsize=ds
self.SetSize(winsize)
self.scp.im.SetBitmap(wx.BitmapFromImage(im))
self.scp.im.points=[]
self.scp.im.axes=[]
self.frameMinMaxContr.SetValue("%d,%d"%self.parameters['framelim'])
if os.path.exists(self.movie.datadir+'paras.txt'): self.ReadParasFromFile(filename=self.movie.datadir+'paras.txt')
f = self.sb.GetFont()
dc = wx.WindowDC(self.sb)
dc.SetFont(f)
width, height = dc.GetTextExtent(self.moviefile)
self.sb.SetStatusWidths([winsize[0]-width-50, width+40])
self.sb.SetStatusText(self.moviefile, 1)
def OpenStackMovie(self,event=None):
dlg = wx.FileDialog(self, "Select image", self.cdir, style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.moviefile=dlg.GetPath()
self.cdir=os.path.dirname(dlg.GetPath())+os.sep
print self.cdir
if os.name=='posix':
with open(os.path.abspath(__file__), 'r') as f:
text=f.read()
text=re.sub("(?<=\nmoviedir=').*?(?='#end)",self.cdir,text)
with open(os.path.abspath(__file__), 'w') as f:
f.write(text)
self.movie=rt.imStack(self.moviefile)
self.images['Original']=cv2.imread(self.movie.stack[0],1)
self.parameters=self.movie.parameters
self.frameSldr.SetMin(0)
self.frameSldr.SetMax(self.parameters['frames'])
self.frameSldr.SetValue(0)
self.frameContr.SetValue('0')
self.stCropContr.SetValue(str(self.movie.parameters['crop'])[1:-1])
self.framenum=0
self.zoomBox.SetValue('100%')
self.scp.im.scale=1.
image=self.images['Original']
if type(image).__name__=='ndarray':
im = wx.EmptyImage(image.shape[1],image.shape[0])
im.SetData(image.tostring())
if type(image).__name__=='str':
im=wx.Image(image)
im.Rescale(im.GetSize()[0]*self.scp.im.scale,im.GetSize()[1]*self.scp.im.scale)
#ds=wx.GetDisplaySize()
ds=wx.Display(0).GetGeometry().GetSize()
ws=(im.GetSize()[0]+120,im.GetSize()[1]+200)
if ws[0]<ds[0] and ws[1]<ds[1]:
winsize=ws
else:
winsize=ds
self.SetSize(winsize)
self.scp.im.SetBitmap(wx.BitmapFromImage(im))
self.scp.im.points=[]
self.scp.im.axes=[]
self.frameMinMaxContr.SetValue("%d,%d"%self.parameters['framelim'])
if os.path.exists(self.movie.datadir+'paras.txt'): self.ReadParasFromFile(filename=self.movie.datadir+'paras.txt')
f = self.sb.GetFont()
dc = wx.WindowDC(self.sb)
dc.SetFont(f)
width, height = dc.GetTextExtent(self.moviefile)
self.sb.SetStatusWidths([winsize[0]-width-50, width+40])
self.sb.SetStatusText(self.moviefile, 1)
def OpenClMovie(self,event=None):
dlg = wx.FileDialog(self, "Choose image", self.cdir, style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.moviefile=dlg.GetPath()
self.cdir=os.path.dirname(dlg.GetPath())+os.sep
print self.cdir
if os.name=='posix': #this assumes you installed mplayer!
with open(os.path.abspath(__file__), 'r') as f:
text=f.read()
text=re.sub("(?<=\nmoviedir=').*?(?='#end)",self.cdir,text)
with open(os.path.abspath(__file__), 'w') as f:
f.write(text)
#print ' '.join(['mplayer','-vo','null','-ao','null','-identify','-frames','0',self.moviefile])
result = subprocess.check_output(['mplayer','-vo','null','-ao','null','-identify','-frames','0',self.moviefile])
if os.name=='nt': #this assumes you installed mplayer and have the folder in your PATH!
result = subprocess.check_output(['mplayer.exe','-vo','null','-ao', 'null','-identify','-frames','0',self.moviefile])
self.parameters['imsize']=(int(re.search('(?<=ID_VIDEO_WIDTH=)[0-9]+',result).group()),int(re.search('(?<=ID_VIDEO_HEIGHT=)[0-9]+',result).group()))
self.parameters['framerate']=float(re.search('(?<=ID_VIDEO_FPS=)[0-9.]+',result).group())
self.parameters['frames']=int(round(float(re.search('(?<=ID_LENGTH=)[0-9.]+',result).group())*self.parameters['framerate']))
self.parameters['framelim']=(0,self.parameters['frames'])
self.frameSldr.SetMin(0)
self.frameSldr.SetMax(self.parameters['frames'])
self.frameSldr.SetValue(0)
self.frameContr.SetValue('0')
self.framenum=0
self.movie=rt.clusterMovie(self.moviefile)
self.images['Original']=self.movie.getFrame(self.framenum)
self.zoomBox.SetValue('100%')
self.scp.im.scale=1.
image=self.images['Original']
if type(image).__name__=='ndarray':
im = wx.EmptyImage(image.shape[1],image.shape[0])
im.SetData(image.tostring())
if type(image).__name__=='str':
im=wx.Image(image)
im.Rescale(im.GetSize()[0]*self.scp.im.scale,im.GetSize()[1]*self.scp.im.scale)
#ds=wx.GetDisplaySize()
ds=wx.Display(0).GetGeometry().GetSize()
ws=(im.GetSize()[0]+120,im.GetSize()[1]+200)
if ws[0]<ds[0] and ws[1]<ds[1]:
winsize=ws
else:
winsize=ds
self.SetSize(winsize)
self.scp.im.SetBitmap(wx.BitmapFromImage(im))
self.scp.im.points=[]
self.scp.im.axes=[]
self.frameMinMaxContr.SetValue("%d,%d"%self.parameters['framelim'])
if os.path.exists(self.movie.datadir+'paras.txt'): self.ReadParasFromFile(filename=self.movie.datadir+'paras.txt')
f = self.sb.GetFont()
dc = wx.WindowDC(self.sb)
dc.SetFont(f)
width, height = dc.GetTextExtent(self.moviefile)
self.sb.SetStatusWidths([winsize[0]-width-50, width+40])
self.sb.SetStatusText(self.moviefile, 1)
def ShowParas(self,event=None, text=''):
try:
self.infoWin.Update(text)
except AttributeError:
self.infoWin=InfoWin(self)
self.infoWin.Show()
self.infoWin.Update(text)
self.infoWin.Raise()
def ShowHistogram(self,event):
try:
self.HistoWin.Update(self, self.images[self.imType])
except AttributeError:
self.HistoWin=HistoWin(self, self.images[self.imType])
self.HistoWin.Show()
self.HistoWin.Raise()
def Zoom(self,event=None):
try:
sc=float(self.zoomBox.GetValue()[:-1])/100.
self.scp.im.scale=sc
image=self.images[self.imType]
if type(image).__name__=='ndarray':
im = wx.EmptyImage(image.shape[0],image.shape[1])
im.SetData(image.tostring())
if type(image).__name__=='str':
im=wx.Image(image)
#ds=wx.GetDisplaySize()
ds=wx.Display(0).GetGeometry().GetSize()
ws=(int(im.GetSize()[0]*sc+300),int(im.GetSize()[1]*sc+200))
self.SetSize((min(ws[0],ds[0]),min(ws[1],ds[1])))
im.Rescale(im.GetSize()[0]*sc,im.GetSize()[1]*sc)
self.scp.im.SetBitmap(wx.BitmapFromImage(im))
self.scp.im.Redraw()
except ValueError:
self.zoomBox.SetValue('{0:d}%'.format(int(self.scp.im.scale*100)))
def ImgDisplay(self,event=None):
if self.movie.typ!="none":
self.parameters['channel']=int(self.channelCB.GetValue())
if self.maskCheck.GetValue():
try:
im=np.array(Image.open(self.movie.datadir+'mask.png'))
if len(im.shape)==3: im=im[:,:,self.parameters['channel']]
mask=(im>0).astype(float)
except: mask=np.zeros(self.movie.parameters['imsize'][::-1])+1.
else: mask=np.zeros(self.movie.parameters['imsize'][::-1])+1.
self.images['Mask']=mask.astype(np.uint8)*255
for item in self.rImstate:
if item.GetValue(): self.imType=item.GetLabelText()
if self.imType=='Background':
if type(self.movie.bg).__name__!='ndarray':
if os.path.exists(self.movie.datadir+'bg.png'):
self.movie.loadBG()
else:
self.sb.SetStatusText('Working... Extracting background', 1)
s=self.BGrngContr.GetValue() #text field
BGrng=(int(s.split(',')[0]),int(s.split(',')[1]))
num=int(1.e8/(self.movie.parameters['imsize'][0]*self.movie.parameters['imsize'][1]))
if num<10: num=10
if num>50: num=50
print num
if BGrng[1]<0: bg=self.movie.getBGold(cutoff=BGrng[1], num=num, spac=int(self.parameters['frames']/(num+1)), prerun=30, save=True,channel=self.parameters['channel'])
else: bg=self.movie.getBG(rng=BGrng, num=num, spac=int(self.parameters['frames']/(num+1)), prerun=30, save=True,channel=self.parameters['channel'])
self.sb.SetStatusText(self.moviefile, 1)
self.images['Background']=self.movie.bg
else:
if type(self.images['Original']).__name__!='ndarray':
image=self.movie.getFrame(self.framenum)
self.images['Original']=image.copy()
else: image=self.images['Original'].copy()
if len(image.shape)>2:
image=image[:,:,self.parameters['channel']]
self.images['Single channel'] = image.copy()
bgsub=image.astype(float)-self.movie.bg
self.images['BG treated']=rt.mxContr(bgsub)*mask+255*(1-mask)
thresh=rt.mxContr((self.images['BG treated']<self.parameters['threshold']).astype(int))
if self.parameters['struct']>0:
thresh=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel)
if self.invCheck.GetValue(): thresh=255-thresh
self.images['Threshold']=thresh.copy()
#contours, hierarchy=cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
self.images['Particles']=self.images['Original'].copy()
if self.imType=='Particles':
blobs,contours=rt.extract_blobs(thresh, -1, self.parameters['blobsize'], self.parameters['sphericity'], diskfit=True,returnCont=True, outpSpac=1) #TODO:why is diskfit hardcoded to True?
for b in range(len(blobs)):
if blobs[b][-2]==0:
if self.diskfitCheck.GetValue(): cv2.circle(self.images['Particles'],(np.int32(blobs[b][3]),np.int32(blobs[b][4])),np.int32(np.sqrt(blobs[b][2]/np.pi)),(255,120,0),2)
else:
print contours[b]
cv2.drawContours(self.images['Particles'],[contours[b]],-1,(0,255,120),2)
else:
if self.diskfitCheck.GetValue(): cv2.circle(self.images['Particles'],(np.int32(blobs[b][3]),np.int32(blobs[b][4])),np.int32(np.sqrt(blobs[b][2]/np.pi)),(0,255,120),2)
else:
print contours[b]
cv2.drawContours(self.images['Particles'],[contours[b]],-1,(0,255,120),2)
contcount=blobs.shape[0]
if self.sizeCheck.GetValue():
r1=np.ceil(np.sqrt(self.parameters['blobsize'][0]/np.pi))
r2=np.ceil(np.sqrt(self.parameters['blobsize'][1]/np.pi))
cv2.circle(self.images['Particles'],(np.int32(r2+5),np.int32(r2+5)),np.int32(r1),(255,0,0),-1)
cv2.circle(self.images['Particles'],(np.int32(3*r2+10),np.int32(r2+5)),np.int32(r2),(255,0,0),-1)
self.sb.SetStatusText("%d particles"%contcount, 0)
self.scp.im.Redraw()
try: self.HistoWin.Update(self, self.images[self.imType])
except AttributeError: pass
def ClImgDisplay(self,event=None):
if self.movie.typ!='none':
self.parameters['channel']=int(self.channelCB.GetValue())
if self.maskCheck.GetValue():
try:
im=np.array(Image.open(self.movie.datadir+'mask.png'))
if len(im.shape)==3: im=im[:,:,self.parameters['channel']]
mask=(im>0).astype(float)
if self.parameters['circle'][0]==0:
th=im.copy().astype(np.uint8)
contours, hierarchy=cv2.findContours(th,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
(xm,ym),rm=cv2.minEnclosingCircle(contours[0])
self.parameters['circle']=[xm,self.parameters['imsize'][1]-ym,rm]
print self.parameters['circle']
except:
mask=np.zeros(self.movie.parameters['imsize'][::-1])+1.
else:
mask=np.zeros(self.movie.parameters['imsize'][::-1])+1.
self.images['Mask']=mask.astype(np.uint8)*255
for item in self.cImstate:
if item.GetValue(): self.imType=item.GetLabelText()
if type(self.images['Original']).__name__!='ndarray':
image=self.movie.getFrame(self.framenum)
self.images['Original']=image.copy()
else: image=self.images['Original'].copy()
if len(image.shape)>2:
image=image[:,:,self.parameters['channel']]
self.images['Single channel'] = image.copy()
blur=rt.mxContr(image.copy())*mask+255*(1-mask)
blur=cv2.GaussianBlur(blur,(self.parameters['blur'],self.parameters['blur']),0)
self.images['Blur']=blur.copy()
thresh=rt.mxContr((blur<self.parameters['threshold']).astype(int))
if self.invCheck.GetValue(): thresh=255-thresh
if self.parameters['struct']>0:
thresh=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel)
if self.invCheck.GetValue(): thresh=255-thresh
self.images['Threshold']=thresh.copy()
self.images['Clusters']=self.images['Original'].copy()
self.images['Voronoi']=self.images['Original'].copy()
if self.imType=='Clusters':
blobs,contours=rt.extract_blobs(thresh, -1, self.parameters['blobsize'], -1, diskfit=False,returnCont=True, outpSpac=1)
count = 0
contcount=blobs.shape[0]
if self.clustNumCheck.GetValue() and contcount>0:
self.ShowParas(text=str(blobs[:,1:]))
for b in range(len(blobs)):
if self.clustNumCheck.GetValue():
cv2.putText(self.images['Clusters'],str(count), (int(blobs[count,3]),int(blobs[count,4])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0),2)
count +=1
cv2.drawContours(self.images['Clusters'],[contours[b]],-1,(0,255,120),2)
contcount=blobs.shape[0]
self.sb.SetStatusText("%d clusters"%contcount, 0)
if self.sizeCheck.GetValue():
r1=np.ceil(np.sqrt(self.parameters['blobsize'][0]/np.pi))
r2=np.ceil(np.sqrt(self.parameters['blobsize'][1]/np.pi))
cv2.circle(self.images['Clusters'],(np.int32(r2+5),np.int32(r2+5)),np.int32(r1),(255,0,0),-1)
cv2.circle(self.images['Clusters'],(np.int32(3*r2+10),np.int32(r2+5)),np.int32(r2),(255,0,0),-1)
if self.imType=='Voronoi' and rt.vorflag:
blobs=rt.extract_blobs(thresh, -1, self.parameters['blobsize'], -1, diskfit=False,returnCont=False, outpSpac=1)
if blobs.shape[0]>1:
newpoints=[]
vor=rt.Voronoi(blobs[:,3:5])
circ=self.parameters['circle']
dists=np.sum((vor.vertices-np.array(circ[:2]))**2,axis=1)-circ[2]**2
extinds=[-1]+(dists>0).nonzero()[0]
for i in range(blobs.shape[0]):
r=vor.regions[vor.point_region[i]]
newpoints+=[rt.circle_invert(blobs[i,3:5],circ, integ=True)]
pts=np.vstack((blobs[:,3:5],np.array(newpoints)))
vor=rt.Voronoi(pts)
for i in range(blobs.shape[0]):
r=vor.regions[vor.point_region[i]]
col=tuple([int(255*c) for c in cm.jet(i*255/blobs.shape[0])])[:3]
cv2.polylines(self.images['Voronoi'], [(vor.vertices[r]).astype(np.int32)], True, col[:3], 2)
cv2.circle(self.images['Voronoi'], (int(circ[0]),int(circ[1])), int(circ[2]),(0,0,255),2)
self.scp.im.Redraw()
try: self.HistoWin.Update(self, self.images[self.imType])
except AttributeError: pass
def StImgDisplay(self,event=None):
if self.movie.typ=='3D stack':
self.parameters['channel']=int(self.channelCB.GetValue())
for item in self.sImstate:
if item.GetValue(): self.imType=item.GetLabelText()
image=self.movie.getFrame(self.framenum)
#print self.movie.crop
if type(image).__name__=='ndarray':
if image.shape[:2]!=(self.movie.parameters['crop'][2]-self.movie.parameters['crop'][0],self.movie.parameters['crop'][3]-self.movie.parameters['crop'][1]):
if len(image.shape)==2: image=image[self.movie.parameters['crop'][0]:self.movie.parameters['crop'][2],self.movie.parameters['crop'][1]:self.movie.parameters['crop'][3]]
if len(image.shape)==3: image=image[self.movie.parameters['crop'][0]:self.movie.parameters['crop'][2],self.movie.parameters['crop'][1]:self.movie.parameters['crop'][3],:]
self.images['Original']=image.copy()
if len(image.shape)>2:
image=image[:,:,self.parameters['channel']]
print image.shape, len(image.shape)
if self.parameters['blur']>1:
image=cv2.GaussianBlur(image,(self.parameters['blur'],self.parameters['blur']),0)
self.images['Single channel'] = image.copy()
thresh=rt.mxContr((self.images['Single channel']<self.parameters['threshold']).astype(int))
if self.parameters['struct']>0:
thresh=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel)
if self.invCheck.GetValue(): thresh=255-thresh
self.images['Threshold']=thresh.copy()
self.images['Particles']=self.images['Original'].copy()
if self.imType=='Particles':
if np.amin(thresh)!=np.amax(thresh): blobs,contours=rt.extract_blobs(thresh, -1, self.parameters['blobsize'], self.parameters['sphericity'], diskfit=True,returnCont=True, outpSpac=1)
else: blobs,contours=np.array([]).reshape(0,8),[]
for b in range(len(blobs)):
if blobs[b][-2]==0:
if self.diskfitCheck.GetValue():
cv2.circle(self.images['Particles'],(np.int32(blobs[b][3]),np.int32(blobs[b][4])),np.int32(np.sqrt(blobs[b][2]/np.pi)),(255,120,0),2)
else:
#print contours[b]
cv2.drawContours(self.images['Particles'],[contours[b]],-1,(0,255,120),2)
else:
if self.diskfitCheck.GetValue():
cv2.circle(self.images['Particles'],(np.int32(blobs[b][3]),np.int32(blobs[b][4])),np.int32(np.sqrt(blobs[b][2]/np.pi)),(0,255,120),2)
else:
#print contours[b]
cv2.drawContours(self.images['Particles'],[contours[b]],-1,(0,255,120),2)
contcount=blobs.shape[0]
if self.sizeCheck.GetValue():
r1=np.ceil(np.sqrt(self.parameters['blobsize'][0]/np.pi))
r2=np.ceil(np.sqrt(self.parameters['blobsize'][1]/np.pi))
cv2.circle(self.images['Particles'],(np.int32(r2+5),np.int32(r2+5)),np.int32(r1),(255,0,0),-1)
cv2.circle(self.images['Particles'],(np.int32(3*r2+10),np.int32(r2+5)),np.int32(r2),(255,0,0),-1)
self.sb.SetStatusText("%d particles"%contcount, 0)
self.scp.im.Redraw()
try: self.HistoWin.Update(self, self.images[self.imType])
except AttributeError: pass
def ExportImage(self,event):
dlg = wx.FileDialog(self, "Export current image to PNG", self.cdir, "",
"PNG files (*.png)|*.png", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
Image.fromarray(self.images[self.imType]).convert('RGB').save(dlg.GetPath())
def ReadParas(self,event):
evID=event.GetId()
if evID==200: #binary threshold
thresh=int(self.threshContr.GetValue())
if 0<=thresh<=255: self.parameters['threshold']=thresh
if evID==201: #structuring element size
self.parameters['struct']=int(self.strContr.GetValue())
if self.parameters['struct']>0: self.kernel= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.parameters['struct'],self.parameters['struct']))
else: self.kernel=False
#frame number
if evID==202: self.framenum=self.frameSldr.GetValue() #slider
if evID==203: self.framenum+=1 #fwd Button
if evID==204: self.framenum-=1 #back Button
if evID==205: self.framenum=int(self.frameContr.GetValue()) #text field
if evID==206:
s=self.psizeContr.GetValue() #text field
self.parameters['blobsize']=(int(s.split(',')[0]),int(s.split(',')[1]))
if evID==207: self.parameters['channel']=int(self.channelCB.GetValue())
if evID==208: self.parameters['sphericity']=float(self.sphericityContr.GetValue())
if evID==209:
self.parameters['blur']=int(self.blurContr.GetValue())
if self.parameters['blur']%2==0: self.parameters['blur']+=1
if evID==210:
self.parameters['spacing']=int(self.frameSpacContr.GetValue())
if evID in range(211,215):
self.parameters['sizepreview']=self.sizeCheck.GetValue()
self.parameters['mask']=self.maskCheck.GetValue()
self.parameters['invert']=self.invCheck.GetValue()
self.parameters['diskfit']=self.diskfitCheck.GetValue()
if evID==651:
if self.movie.typ=="3D stack":
try:
self.movie.parameters['crop']=[int(i) for i in self.stCropContr.GetValue().split(',')]
self.parameters['crop']=self.movie.parameters['crop']
except: raise
self.frameContr.SetValue(str(self.framenum))
self.frameSpacContr.SetValue(str(self.parameters['spacing']))
self.frameSldr.SetValue(self.framenum)
self.strContr.SetValue(str(self.parameters['struct']))
self.threshContr.SetValue(str(self.parameters['threshold']))
self.sphericityContr.SetValue(str(self.parameters['sphericity']))
self.blurContr.SetValue(str(self.parameters['blur']))
if self.movie.typ=="3D stack": self.stCropContr.SetValue(str(self.movie.parameters['crop'])[1:-1])
self.images['Original']=self.movie.getFrame(self.framenum)
try: self.infoWin.Update()
except AttributeError:
self.infoWin=InfoWin(self)
self.infoWin.Show()
self.infoWin.Update()
if evID>0: self.infoWin.Raise()
text=self.nb.GetPageText(self.nb.GetSelection())
if text=='Particles':
self.ImgDisplay()
elif text=='Clusters':
self.ClImgDisplay()
elif text=='3D stack':
self.StImgDisplay()
#self.sSaveProject()
def ReadParasFromFile(self,event=None, filename=''):
if os.path.exists(self.movie.datadir): d=self.movie.datadir
else: d=''
if filename=='':
dlg = wx.FileDialog(self, "Choose parameter file", d, 'para.txt',style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetPath()
try:
with open(filename,'r') as f: text=f.read()
text=text.split('\n')
for t in text:
t=t.split(':')
if t[0].strip() in ['struct','threshold','frames', 'channel','blur','spacing','imgspacing','maxdist','lossmargin','lenlim']:#integer parameters
self.parameters[t[0]]=int(t[1].strip())
if t[0].strip() in ['blobsize','imsize', 'crop', 'framelim','circle']:#tuple parameters
tsplit=re.sub('[\s\[\]\(\)]','',t[1]).split(',')
self.parameters[t[0]]=tuple([int(float(it)) for it in tsplit]) #this is a bit of a hack, but strings with dots in them don't convert to int, apparently
if t[0].strip() in ['framerate','sphericity','xscale','yscale','zscale']:#float parameters
self.parameters[t[0]]=float(t[1].strip())
if t[0].strip() == 'channel':
self.channelCB.SetValue(t[1].strip())
if t[0].strip() in ['sizepreview','mask','diskfit','invert']:#float parameters
self.parameters[t[0]]=rt.str_to_bool(t[1].strip())
self.strContr.SetValue(str(self.parameters['struct']))
if self.parameters['struct']>0: self.kernel= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.parameters['struct'],self.parameters['struct']))
else: self.kernel=False
self.threshContr.SetValue(str(self.parameters['threshold']))
self.psizeContr.SetValue(str(self.parameters['blobsize']).replace(' ','')[1:-1])
self.sphericityContr.SetValue("%.2f"%self.parameters['sphericity'])
self.blurContr.SetValue("%d"%self.parameters['blur'])
self.frameSpacContr.SetValue("%d"%self.parameters['spacing'])
self.stCropContr.SetValue(str(self.parameters['crop']).replace(' ','')[1:-1])
self.maskCheck.SetValue(self.parameters['mask'])
self.diskfitCheck.SetValue(self.parameters['diskfit'])
self.sizeCheck.SetValue(self.parameters['sizepreview'])
self.invCheck.SetValue(self.parameters['invert'])
self.ShowParas()
except:
print "Ooops... Try a different file?"
self.sb.SetStatusText("Ooops... Try a different file?",0)
raise
def GetCoordinates(self,event):
self.getCoordB.Disable()
self.sb.SetStatusText("Working... Running coordinate analysis",1)
try:
lim=self.frameMinMaxContr.GetValue()
self.parameters['framelim']=(int(lim.split(',')[0]),int(lim.split(',')[1]))
except:
pass
if self.maskCheck.GetValue():
try: mask=self.movie.datadir+'mask.png'
except: mask=False
else: mask=False
self.parameters['channel']=int(self.channelCB.GetValue())
self.movie.extractCoords(framelim=self.parameters['framelim'], blobsize=self.parameters['blobsize'], threshold=self.parameters['threshold'],kernel=self.kernel, delete=True, mask=mask,channel=self.parameters['channel'], sphericity=self.parameters['sphericity'],diskfit=self.diskfitCheck.GetValue(), crop=self.parameters['crop'], invert=self.invCheck.GetValue())
self.sb.SetStatusText(self.moviefile, 1)
self.getCoordB.Enable()
def GetClusters(self,event):
print self.parameters
self.getCluB.Disable()
self.sb.SetStatusText("Working... Running coordinate analysis",1)
try:
lim=self.frameMinMaxContr.GetValue()
self.parameters['framelim']=(int(lim.split(',')[0]),int(lim.split(',')[1]))
except:
pass
if self.maskCheck.GetValue(): mask=self.movie.datadir+'mask.png'
else: mask=False
self.parameters['channel']=int(self.channelCB.GetValue())
self.movie.getClusters(thresh=self.parameters['threshold'],gkern=self.parameters['blur'],clsize=self.parameters['blobsize'],channel=self.parameters['channel'],rng=self.parameters['framelim'],spacing=self.parameters['spacing'], maskfile=self.movie.datadir+'mask.png', circ=self.parameters['circle'],imgspacing=self.parameters['imgspacing'])
self.sb.SetStatusText(self.moviefile, 1)
self.getCluB.Enable()
def ConvClustTraj(self,event):
if os.path.exists(self.movie.datadir+'clusters.txt'):
datafile=self.movie.datadir+"clusters.txt"
else:
dlg = wx.FileDialog(self, "Select cluster data file", self.cdir, style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
datafile=dlg.GetPath()
else:
datafile=""
if datafile!="":
self.convTrajCluB.Disable()
self.sb.SetStatusText("Working... Extracting trajectories from coordinates.",1)
self.movie.CoordtoTraj(tempfile=datafile, lossmargin=1,maxdist=self.parameters['maxdist'],spacing=self.parameters['spacing'])
self.sb.SetStatusText(self.moviefile, 1)
self.convTrajCluB.Enable()
def GetTrajectories(self,event):
self.getTrajB.Disable()
self.sb.SetStatusText("Working... Running trajectory analysis",1)
dlg = wx.FileDialog(self, "Choose coordinate file", self.movie.datadir, style=wx.OPEN)
flag=False
fname=''
if dlg.ShowModal() == wx.ID_OK:
fname=dlg.GetPath()
with open(fname,'r') as f:
fileheader=f.readline()
if rt.COORDHEADER==fileheader: flag=True
else:
wx.MessageBox('Please provide/generate a coordinate file (via Get Coordinates).', 'Info', wx.OK | wx.ICON_INFORMATION)
if flag:
self.movie.CoordtoTraj(tempfile=fname,lenlim=self.parameters['lenlim'], delete=True, breakind=1e9, maxdist=self.parameters['maxdist'], lossmargin=self.parameters['lossmargin'], spacing=self.parameters['spacing'])
else:
wx.MessageBox('Please provide/generate a coordinate file (via Get Coordinates).', 'Info', wx.OK | wx.ICON_INFORMATION)
self.getTrajB.Enable()
def GetBG(self,event):
s=self.BGrngContr.GetValue() #text field
BGrng=(int(s.split(',')[0]),int(s.split(',')[1]))
self.parameters['channel']=int(self.channelCB.GetValue())
if BGrng[1]<0: bg=self.movie.getBGold(cutoff=BGrng[0], num=40, spac=int(self.parameters['frames']/51), prerun=100, save=True,channel=self.parameters['channel'])
else: bg=self.movie.getBG(rng=BGrng, num=40, spac=int(self.parameters['frames']/51), prerun=100, save=True,channel=self.parameters['channel'])
def ResetCrop(self,event):
if self.movie.typ=='3D stack':
self.movie.parameters['crop']=[0,0,self.movie.parameters['imsize'][0],self.movie.parameters['imsize'][1]]
self.parameters['crop']=self.movie.parameters['crop']
self.stCropContr.SetValue(str(self.movie.parameters['crop'])[1:-1])
self.StImgDisplay()
def PlotStack(self,event):
self.stackwin=StackWin(self)
self.stackwin.Show()
self.stackwin.Raise()
def OnClose(self,event):
self.Destroy()
app=wx.App(redirect=False)
frame=MyFrame()
frame.Show()
app.MainLoop()
app.Destroy()
| gpl-2.0 |
googleinterns/debaised-analysis | intents/oversights/test_calendar_vs_experience_time.py | 1 | 3409 | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module contains tests for oversight calendar vs experience
in time series.
"""
import sys
sys.path.append(".")
import pandas
import calendar_vs_experience_time
from util.enums import SummaryOperators
from util.enums import Filters
def test_1():
"""
query : compare average sales of A and B in date range 2000 to 2010
here the oversight is detected as the 2 companies differ in experience
time.
"""
table = pandas.DataFrame()
table['Company'] = pandas.Series(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B'])
table['year'] = pandas.Series(['2001', '2006', '2002', '2007', '2003', '2008', '2004', '2009'])
table['sales'] = pandas.Series([1, 34, 23, 42, 23, 1324, 34, 134])
print(table)
suggestion = calendar_vs_experience_time.calendar_vs_experience_time(table, 'sales',
['Company', 'year', 'sales'],
'Company', 'A', 'B',
SummaryOperators.MEAN,
date_column_name='year',
date_range=['2000-01-01', '2010-01-01'],
date_format='%Y')
print(suggestion)
expected_suggestion = "{'oversight': <Oversights.CALENDAR_VS_EXPERIENCE_IN_TIME_SERIES: 12>, 'confidence_score': 1.0, 'suggestion': 'The entries in the date range mentioned are not consistent for both the slices'}"
assert(str(suggestion) == expected_suggestion)
def test_2():
"""
query : compare average sales of A and B in date range 2000 to 2010
here the oversight is not detected as the 2 companies don't differ in
experience time
"""
table = pandas.DataFrame()
table['Company'] = pandas.Series(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B'])
table['year'] = pandas.Series(['2001', '2001', '2002', '2002', '2006', '2006', '2007', '2007'])
table['sales'] = pandas.Series([1, 34, 23, 42, 23, 1324, 34, 134])
print(table)
suggestion = calendar_vs_experience_time.calendar_vs_experience_time(table, 'sales',
['Company', 'year', 'sales'],
'Company', 'A', 'B',
SummaryOperators.MEAN,
date_column_name='year',
date_range=['2000-01-01', '2010-01-01'],
date_format='%Y')
print(suggestion)
expected_suggestion = 'None'
assert(str(suggestion) == expected_suggestion)
print(test_1.__doc__)
test_1()
print(test_2.__doc__)
test_2()
print("Test cases completed")
| apache-2.0 |
nalabelle/druid-dash | query/views/QueryView.py | 2 | 1701 | from rest_framework.decorators import detail_route
from rest_framework.response import Response
from rest_framework import viewsets
from druidapi.query.models import QueryModel
from druidapi.query.serializers.QuerySerializer import QuerySerializer
from druidapi.backend.DruidConnection import DruidConnection
from rest_pandas.renderers import PandasJSONRenderer
class QueryViewSet(viewsets.ModelViewSet):
model = QueryModel
serializer_class = QuerySerializer
queryset = QueryModel.objects.all()
def create(self, request):
"""
Creates a search object for Druid
"""
serializer = self.get_serializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors)
serializer.save()
return Response(serializer.data)
def retrieve(self, request, *args, **kwargs):
"""
Returns information about a Druid Query
"""
result = self.get_object()
serializer = self.get_serializer(result)
return Response(serializer.data)
@detail_route(['get',], renderer_classes=[PandasJSONRenderer,])
def execute(self, request, *args, **kwargs):
"""
Submits the query and returns the result
"""
query = self.get_object()
dc = DruidConnection()
"""
Instead of building the interval here, as more options are supported,
it would make more sense to pass over the particular object we'd like
to execute, and pull out what we need from it.
"""
result = dc.build(interval="{0}/{1}".format(query.start_date.isoformat(), query.end_date.isoformat()))
return Response(result)
| mit |
tomlof/scikit-learn | sklearn/ensemble/partial_dependence.py | 33 | 15265 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features_) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features_ - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of ints, strings, or tuples of ints or strings
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
If feature_names is specified and seq[i] is an int, seq[i]
must be < len(feature_names).
If seq[i] is a string, feature_names must be specified, and
seq[i] must be in feature_names.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features_ != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features_')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features_)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
ioos/compliance-checker | compliance_checker/tests/test_acdd.py | 2 | 21174 | import os
import numpy as np
from netCDF4 import Dataset
from compliance_checker.acdd import ACDD1_1Check, ACDD1_3Check
from compliance_checker.tests import BaseTestCase
from compliance_checker.tests.helpers import MockTimeSeries, MockVariable
from compliance_checker.tests.resources import STATIC_FILES
def to_singleton_var(l):
"""
Get the first value of a list if this implements iterator protocol and is
not a string
"""
return [x[0] if hasattr(x, "__iter__") and not isinstance(x, str) else x for x in l]
def check_varset_nonintersect(group0, group1):
"""
Returns true if both groups contain the same elements, regardless of
order.
:param list group0: A list of strings to compare
:param list group1: A list of strings to compare
"""
# Performs symmetric difference on two lists converted to sets
return len(set(group0) ^ set(group1)) == 0
class TestACDD1_1(BaseTestCase):
# TODO superclass this so ACDD1_3 can inherit?
# Adapted using `pandas.read_html` from URL
# http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery_1-1
expected = {
"Highly Recommended": ["title", "summary", "keywords"],
"Highly Recommended Variable Attributes": [
"long_name",
"standard_name",
"units",
"coverage_content_type",
],
"Recommended": [
"id",
"naming_authority",
"keywords_vocabulary",
"history",
"comment",
"date_created",
"creator_name",
"creator_url",
"creator_email",
"institution",
"project",
"processing_level",
"geospatial_bounds",
"geospatial_lat_min",
"geospatial_lat_max",
"geospatial_lon_min",
"geospatial_lon_max",
"geospatial_vertical_min",
"geospatial_vertical_max",
"time_coverage_start",
"time_coverage_end",
"time_coverage_duration",
"time_coverage_resolution",
"standard_name_vocabulary",
"license",
],
"Suggested": [
"contributor_name",
"contributor_role",
"publisher_name",
"publisher_url",
"publisher_email",
"date_modified",
"date_issued",
"geospatial_lat_units",
"geospatial_lat_resolution",
"geospatial_lon_units",
"geospatial_lon_resolution",
"geospatial_vertical_units",
"geospatial_vertical_resolution",
"geospatial_vertical_positive",
],
}
def setUp(self):
# Use the NCEI Gold Standard Point dataset for ACDD checks
self.ds = self.load_dataset(STATIC_FILES["ncei_gold_point_1"])
self.acdd = ACDD1_1Check()
self.acdd_highly_recommended = to_singleton_var(self.acdd.high_rec_atts)
self.acdd_recommended = to_singleton_var(self.acdd.rec_atts)
self.acdd_suggested = to_singleton_var(self.acdd.sug_atts)
def test_cc_meta(self):
assert self.acdd._cc_spec == "acdd"
assert self.acdd._cc_spec_version == "1.1"
def test_highly_recommended(self):
"""
Checks that all highly recommended attributes are present
"""
assert check_varset_nonintersect(
self.expected["Highly Recommended"], self.acdd_highly_recommended
)
# Check the reference dataset, NCEI 1.1 Gold Standard Point
missing = ["\"Conventions\" does not contain 'ACDD-1.3'"]
results = self.acdd.check_high(self.ds)
for result in results:
if result.msgs and all([m in missing for m in result.msgs]):
# only the Conventions check should have failed
self.assert_result_is_bad(result)
self.assert_result_is_good(result)
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(empty_ds.close)
results = self.acdd.check_high(empty_ds)
for result in results:
self.assert_result_is_bad(result)
def test_recommended(self):
"""
Checks that all recommended attributes are present
"""
# 'geospatial_bounds' attribute currently has its own separate check
# from the list of required atts
assert check_varset_nonintersect(
self.expected["Recommended"], self.acdd_recommended
)
ncei_exceptions = [
"geospatial_bounds not present",
"time_coverage_duration not present",
"time_coverage_resolution not present",
]
results = self.acdd.check_recommended(self.ds)
for result in results:
if (result.msgs) and all(
[m in ncei_exceptions for m in result.msgs]
): # we're doing string comparisons, this is kind of hacky...
self.assert_result_is_bad(result)
continue
self.assert_result_is_good(result)
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(empty_ds.close)
results = self.acdd.check_recommended(empty_ds)
for result in results:
self.assert_result_is_bad(result)
def test_suggested(self):
"""
Checks that all suggested attributes are present
"""
assert check_varset_nonintersect(
self.expected["Suggested"], self.acdd_suggested
)
# Attributes that are missing from NCEI but should be there
missing = [
"geospatial_lat_resolution not present",
"geospatial_lon_resolution not present",
"geospatial_vertical_resolution not present",
]
results = self.acdd.check_suggested(self.ds)
for result in results:
if (result.msgs) and all(
[m in missing for m in result.msgs]
): # we're doing string comparisons, this is kind of hacky...
self.assert_result_is_bad(result)
continue
self.assert_result_is_good(result)
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(empty_ds.close)
results = self.acdd.check_recommended(empty_ds)
for result in results:
self.assert_result_is_bad(result)
def test_acknowldegement_check(self):
"""Check both the British- and American-English spellings of 'acknowledgement'"""
# Check British Spelling
try:
empty0 = Dataset(os.devnull, "w", diskless=True)
result = self.acdd.check_acknowledgment(empty0)
self.assert_result_is_bad(result)
empty0.acknowledgement = "Attribution goes here"
result = self.acdd.check_acknowledgment(empty0)
self.assert_result_is_good(result)
finally:
empty0.close()
try:
# Check American spelling
empty1 = Dataset(os.devnull, "w", diskless=True)
result = self.acdd.check_acknowledgment(empty1)
self.assert_result_is_bad(result)
empty1.acknowledgment = "Attribution goes here"
result = self.acdd.check_acknowledgment(empty1)
self.assert_result_is_good(result)
finally:
empty1.close()
class TestACDD1_3(BaseTestCase):
# Adapted using `pandas.read_html` from URL
# http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery_1-3
expected = {
"Suggested": [
"creator_type",
"creator_institution",
"publisher_type",
"publisher_institution",
"program",
"contributor_name",
"contributor_role",
"geospatial_lat_units",
"geospatial_lat_resolution",
"geospatial_lon_units",
"geospatial_lon_resolution",
"geospatial_vertical_units",
"geospatial_vertical_resolution",
"date_modified",
"date_issued",
"date_metadata_modified",
"product_version",
"keywords_vocabulary",
"platform",
"platform_vocabulary",
"instrument",
"instrument_vocabulary",
"metadata_link",
"references",
],
"Highly Recommended": ["title", "summary", "keywords", "Conventions"],
"Recommended": [
"id",
"naming_authority",
"history",
"source",
"processing_level",
"comment",
"license",
"standard_name_vocabulary",
"date_created",
"creator_name",
"creator_email",
"creator_url",
"institution",
"project",
"publisher_name",
"publisher_email",
"publisher_url",
"geospatial_bounds",
"geospatial_bounds_crs",
"geospatial_bounds_vertical_crs",
"geospatial_lat_min",
"geospatial_lat_max",
"geospatial_lon_min",
"geospatial_lon_max",
"geospatial_vertical_min",
"geospatial_vertical_max",
"geospatial_vertical_positive",
"time_coverage_start",
"time_coverage_end",
"time_coverage_duration",
"time_coverage_resolution",
],
"Highly Recommended Variable Attributes": [
"long_name",
"standard_name",
"units",
"coverage_content_type",
],
}
def setUp(self):
# Use the NCEI Gold Standard Point dataset for ACDD checks
self.ds = self.load_dataset(STATIC_FILES["ncei_gold_point_2"])
self.acdd = ACDD1_3Check()
self.acdd_highly_recommended = to_singleton_var(self.acdd.high_rec_atts)
self.acdd_recommended = to_singleton_var(self.acdd.rec_atts)
self.acdd_suggested = to_singleton_var(self.acdd.sug_atts)
def test_cc_meta(self):
assert self.acdd._cc_spec == "acdd"
assert self.acdd._cc_spec_version == "1.3"
def test_highly_recommended(self):
"""
Checks that all highly recommended attributes are present
"""
assert check_varset_nonintersect(
self.expected["Highly Recommended"], self.acdd_highly_recommended
)
results = self.acdd.check_high(self.ds)
for result in results:
# NODC 2.0 has a different value in the conventions field
self.assert_result_is_good(result)
def test_recommended(self):
"""
Checks that all recommended attributes are present
"""
assert check_varset_nonintersect(
self.expected["Recommended"], self.acdd_recommended
)
results = self.acdd.check_recommended(self.ds)
ncei_exceptions = [
"time_coverage_duration not present",
"time_coverage_resolution not present",
]
for result in results:
if (result.msgs) and all(
[m in ncei_exceptions for m in result.msgs]
): # we're doing string comparisons, this is kind of hacky...
self.assert_result_is_bad(result)
continue
self.assert_result_is_good(result)
def test_suggested(self):
"""
Checks that all suggested attributes are present
"""
assert check_varset_nonintersect(
self.expected["Suggested"], self.acdd_suggested
)
results = self.acdd.check_suggested(self.ds)
# NCEI does not require or suggest resolution attributes
ncei_exceptions = [
"geospatial_lat_resolution not present",
"geospatial_lon_resolution not present",
"geospatial_vertical_resolution not present",
]
for result in results:
if (result.msgs) and all(
[m in ncei_exceptions for m in result.msgs]
): # we're doing string comparisons, this is kind of hacky...
self.assert_result_is_bad(result)
continue
self.assert_result_is_good(result)
def test_variables(self):
"""
Test that variables are checked for required attributes
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(empty_ds.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
empty_ds.createDimension("time", 1)
empty_ds.createVariable("fake", "float32", ("time",))
# long_name
results = self.acdd.check_var_long_name(self.ds)
for result in results:
self.assert_result_is_good(result)
results = self.acdd.check_var_long_name(empty_ds)
assert len(results) == 1
for result in results:
self.assert_result_is_bad(result)
# standard_name
results = self.acdd.check_var_standard_name(self.ds)
for result in results:
self.assert_result_is_good(result)
results = self.acdd.check_var_standard_name(empty_ds)
assert len(results) == 1
for result in results:
self.assert_result_is_bad(result)
# units
results = self.acdd.check_var_units(self.ds)
for result in results:
self.assert_result_is_good(result)
results = self.acdd.check_var_units(empty_ds)
assert len(results) == 1
for result in results:
self.assert_result_is_bad(result)
def test_vertical_extents(self):
"""
Test vertical extents are being checked
"""
result = self.acdd.check_vertical_extents(self.ds)
self.assert_result_is_good(result)
def test_geospatial_bounds(self):
"""
Test geospatial bounds are checked and provide a good error message
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
empty_ds = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(empty_ds.close)
# Misspelled WKT. Error message should include the attribute checked
# and the value that was provided for easy troubleshooting
empty_ds.geospatial_bounds = "POIT (-123.458000 38.048000)"
results = self.acdd.check_recommended(empty_ds)
for result in results:
if result.variable_name == "geospatial_bounds":
assert (
"Could not parse WKT from geospatial_bounds,"
' possible bad value: "{}"'.format(empty_ds.geospatial_bounds)
in result.msgs
)
def test_time_extents(self):
"""
Test that the time extents are being checked
"""
result = self.acdd.check_time_extents(self.ds)
self.assert_result_is_good(result)
empty_ds = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(empty_ds.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
empty_ds.createDimension("time", 1)
time_var = empty_ds.createVariable("time", "float32", ("time",))
time_var.units = "seconds since 1970-01-01 00:00:00 UTC"
time_var[:] = [1451692800] # 20160102T000000Z in seconds since epoch
empty_ds.time_coverage_start = "20160102T000000Z"
empty_ds.time_coverage_end = "20160102T000000Z"
result = self.acdd.check_time_extents(empty_ds)
self.assert_result_is_good(result)
# try the same thing with time offsets
time_var.units = "seconds since 1970-01-01 00:00:00-10:00"
empty_ds.time_coverage_start = "20160102T000000-1000"
empty_ds.time_coverage_end = "20160102T000000-1000"
result = self.acdd.check_time_extents(empty_ds)
self.assert_result_is_good(result)
def test_check_lat_extents(self):
"""Test the check_lat_extents() method behaves expectedly"""
# create dataset using MockDataset, give it lat/lon dimensions
ds = MockTimeSeries()
ds.variables["lat"][:] = np.linspace(
-135.0, -130.0, num=500
) # arbitrary, but matches time dim size
# test no values, expect failure
result = self.acdd.check_lat_extents(ds)
self.assert_result_is_bad(result)
# give integer geospatial_lat_max/min, test
ds.setncattr("geospatial_lat_min", -135)
ds.setncattr("geospatial_lat_max", -130)
result = self.acdd.check_lat_extents(ds)
self.assert_result_is_good(result)
# give float geospatial_lat_min/max, test
ds.setncattr("geospatial_lat_min", -135.0)
ds.setncattr("geospatial_lat_max", -130.0)
result = self.acdd.check_lat_extents(ds)
self.assert_result_is_good(result)
# give string (in number-form), test
ds.setncattr("geospatial_lat_min", "-135.")
ds.setncattr("geospatial_lat_max", "-130.")
result = self.acdd.check_lat_extents(ds)
self.assert_result_is_good(result)
# give garbage string -- expect failure
ds.setncattr("geospatial_lat_min", "bad")
ds.setncattr("geospatial_lat_max", "val")
result = self.acdd.check_lat_extents(ds)
self.assert_result_is_bad(result)
def test_check_lon_extents(self):
"""Test the check_lon_extents() method behaves expectedly"""
# create dataset using MockDataset, give it lat/lon dimensions
ds = MockTimeSeries()
ds.variables["lon"][:] = np.linspace(65.0, 67.0, num=500)
# test no values, expect failure
# result = self.acdd.check_lon_extents(ds)
# self.assert_result_is_bad(result)
# give integer geospatial_lon_max/min, test
ds.setncattr("geospatial_lon_min", 65)
ds.setncattr("geospatial_lon_max", 67)
result = self.acdd.check_lon_extents(ds)
self.assert_result_is_good(result)
# give float geospatial_lon_min/max, test
ds.setncattr("geospatial_lon_min", 65.0)
ds.setncattr("geospatial_lon_max", 67.0)
result = self.acdd.check_lon_extents(ds)
self.assert_result_is_good(result)
# give string (in number-form), test
ds.setncattr("geospatial_lon_min", "65.")
ds.setncattr("geospatial_lon_max", "67.")
result = self.acdd.check_lon_extents(ds)
self.assert_result_is_good(result)
# give garbage string -- expect failure
ds.setncattr("geospatial_lon_min", "bad")
ds.setncattr("geospatial_lon_max", "val")
result = self.acdd.check_lon_extents(ds)
self.assert_result_is_bad(result)
def test_check_geospatial_vertical_max(self):
ds = MockTimeSeries()
ds.variables["depth"][:] = np.linspace(0.0, 30.0, num=500)
# give integer geospatial_vertical_max/min, test
ds.setncattr("geospatial_vertical_min", 0)
ds.setncattr("geospatial_vertical_max", 30)
result = self.acdd.check_vertical_extents(ds)
self.assert_result_is_good(result)
# give float geospatial_vertical_min/max, test
ds.setncattr("geospatial_vertical_min", 0.0)
ds.setncattr("geospatial_vertical_max", 30.0)
result = self.acdd.check_vertical_extents(ds)
self.assert_result_is_good(result)
# give string (in number-form), test
ds.setncattr("geospatial_vertical_min", "0.")
ds.setncattr("geospatial_vertical_max", "30.")
result = self.acdd.check_vertical_extents(ds)
self.assert_result_is_good(result)
# give garbage string -- expect failure
ds.setncattr("geospatial_vertical_min", "bad")
ds.setncattr("geospatial_vertical_max", "val")
result = self.acdd.check_vertical_extents(ds)
self.assert_result_is_bad(result)
# all masked values mean that there are no valid array elements to get
# the min/max of
ds.setncattr("geospatial_vertical_min", 0.0)
ds.setncattr("geospatial_vertical_max", 30.0)
ds.variables["depth"][:] = np.ma.masked_all(ds.variables["depth"].shape)
result = self.acdd.check_vertical_extents(ds)
self.assert_result_is_bad(result)
| apache-2.0 |
olologin/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
CharlesSanfiorenzo/Bioinformatics | MEP/TranslationEfficiency.py | 1 | 8956 | ######Calculates translation efficiency and updates database & fasta.###########
######If you have any questions, please contact the author of this script####### #Add if upregulation or downregulation
######at [email protected] ######################################################
import sys, getopt
import pandas as pd
import os.path
########Argument parser
def main(argv):
global FPKMfile
global fastaFile
global cutaway
global regType
global bias
regType = 'down'
FPKMfile = ''
fastaFile = ''
cutaway = 33.0
required = []
bias = False
try:
opts, args = getopt.getopt(argv,"hT:f:r:b")
except getopt.GetoptError:
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit(2)
for opt, arg in opts:
if len(sys.argv) == 1 :
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit(2)
elif opt == '-h':
print '''Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa
Example: TranslationalEfficiency.py -T SelectGenesFPKM.txt -f SelectGenesMotif.fa -r 33 -R down > MotifsNoRef.fa
Author: Charles Sanfiorenzo (2017)
--------------------------------------------------------------------
Running this script will calculate translation efficiency and produce
a csv with the results (MEP.fpkm.csv) in the working directory. The
gene with the most downregulation is used as reference for alignment
score determination, and so an updated csv (MEP_NoRefGenes.fpkm.csv) and
two fastas (ReferenceMotif.fa & MotifsNoRef.fa) are created for later
steps in the protocol.
Optional arguments:
-r | establishes the allowed difference in percentage of RNA transcription
per gene. Default: 33
-R | Regulation type under study. Can be 'up' or 'down'. Default: down
For any additional questions, email the author of this script at
[email protected]'''
sys.exit()
elif opt in ("-T"):
FPKMfile = arg
required += [arg]
elif opt in ("-f"):
fastaFile = arg
required += [arg]
elif opt in ("-r"):
cutaway = arg/1.0
elif opt in ("-R"):
regType = arg
elif opt in ("-b"):
bias = True
else :
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit(2)
args = sys.argv[1:]
if not args :
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit()
if len(required) == 1 :
print 'Error: Missing argument.'
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit()
if os.path.isfile(FPKMfile) == False :
print 'Error:',FPKMfile,'not found'
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit()
if os.path.isfile(fastaFile) == False :
print 'Error:',fastaFile,'not found'
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit()
if cutaway.is_integer() == False :
print 'Error:',cutaway,'is not an interger'
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit()
if regType.lower() not in ['down','up'] :
print 'Error: -R must be \'down\' or \'up\'.'
print 'Usage: TranslationEfficiency.py -T <FPKMTable> -f <fastafile> [optional: -r <retention value> -R <regulation type> ] > MotifsNoRef.fa'
print 'Note: Type \'-h\' for additional help'
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
########
columns=['Gene Symbol', 'Ribo-Seq: Control (Optimal conditions) FPKM', 'Ribo-Seq: Amino acid starvation FPKM', 'RNA-Seq: Control (Optimal conditions) FPKM', 'RNA-Seq: Amino acid starvation FPKM']
df = pd.read_csv(FPKMfile, sep=" ", skipinitialspace=False, header=None)
df.columns = columns
index = range(len(df))
pd.DataFrame(index=index)
newLst = []
transEff = []
for idx in df.index :
if float(df.ix[:, 'RNA-Seq: Control (Optimal conditions) FPKM'][idx]) - float(df.ix[:, 'RNA-Seq: Amino acid starvation FPKM'][idx]) >= (cutaway/100)*float(df.ix[:, 'RNA-Seq: Control (Optimal conditions) FPKM'][idx]) :
transEff += ['Transcription Regulation']
newLst += [0]
else :
result = (float(df.ix[:, 'Ribo-Seq: Control (Optimal conditions) FPKM'][idx]) ) - (float(df.ix[:, 'Ribo-Seq: Amino acid starvation FPKM'][idx]) )
if float(df.ix[:, 'Ribo-Seq: Control (Optimal conditions) FPKM'][idx]) != 0 :
transEff += [-result/float(df.ix[:, 'Ribo-Seq: Control (Optimal conditions) FPKM'][idx])]
else :
transEff += [-result] #This fix assumes that the FPKM value for this gene is also small (as to not affect training during SVM implementation)
#We will assume that any gene expressed in sufficiently small amounts as to result in 0 FPKM in either control or experimental data sets should be ignored for minimum estimation
if float(df.ix[:, 'Ribo-Seq: Control (Optimal conditions) FPKM'][idx]) == 0 or float(df.ix[:, 'Ribo-Seq: Amino acid starvation FPKM'][idx]) == 0 :
newLst += [0]
else :
#To know which gene has the largest difference in translation levels, we will need to divide each substraction by their control FPKM
newLst += [float(-result)/float(df.ix[:, 'Ribo-Seq: Control (Optimal conditions) FPKM'][idx])]
df['Translational Efficiency'] = transEff
#Minimum or maximum estimation
if regType.lower() == 'down' :
minID = [i for i,x in enumerate(newLst) if x == min(newLst)]
minGene = df.ix[:, 'Gene Symbol'][minID].values[0]
elif regType.lower() == 'up' :
minID = [i for i,x in enumerate(newLst) if x == max(newLst)]
minGene = df.ix[:, 'Gene Symbol'][minID].values[0]
columns=['Gene Symbol', 'Ribo-Seq: Control (Optimal conditions) FPKM', 'Ribo-Seq: Amino acid starvation FPKM', 'RNA-Seq: Control (Optimal conditions) FPKM', 'RNA-Seq: Amino acid starvation FPKM', 'Translational Efficiency']
df.to_csv('MEP2.fpkm.csv')
#Produce updated csv w/o reference genes
df = df[df['Gene Symbol'] != minGene]
df.reset_index(drop=True,inplace=True)
if bias == True : #Move me up!
#df = df[df['Ribo-Seq: Control (Optimal conditions) FPKM'] <= 1 ]
#df = df[df['RNA-Seq: Control (Optimal conditions) FPKM'] <= 1 ] #Fix Me
#df = df[df['RNA-Seq: Amino acid starvation FPKM'] <= 1 ]
df.reset_index(drop=True,inplace=True)
df.to_csv('MEP2_NoRefGene.fpkm.csv')
#Produce updated fasta
# A nice little file reader
def contentExtractor(files) :
with open(files) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
return content
content = contentExtractor(fastaFile)
############ Generator 1 (Header and sequence extractor; also removes from fasta gene that will be used as reference)
sequences = []
headers = []
referenceHead = []
referenceSeq = []
for element in content :
if element[0] == '>' :
if element != '>'+minGene :
headers += [element]
previousElem = ''
else :
previousElem = element
referenceHead += [element]
else :
if previousElem != '>'+minGene :
sequences += [element]
else :
referenceSeq += [element]
############
############ Generator 2 (Outputs updated fasta w/o reference genes)
for i in range(len(headers)) :
print headers[i],'\n',sequences[i]
############
############ Generator 3 (Outputs fasta w/ genes to be used as reference for Needle alignment)
f = open('NeedleReference.fa','w')
for i in range(len(referenceHead)) :
print >>f, referenceHead[i], '\n', referenceSeq[i]
f.close()
############
#Output MEP2 settings log
f = open('MEP2.settings','w')
print >>f, "[MEP2Settings]"
print >>f, "Regulation Type =", regType.lower()
print >>f, "Retention value =", cutaway
print >>f, "FPKM Table used =",FPKMfile
print >>f, "Fasta used =",fastaFile
print >>f, "Reference Gene =", minGene
f.close()
| mit |
raghavrv/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 42 | 27323 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
def test_cosine_distances():
# Check the pairwise Cosine distances computation
rng = np.random.RandomState(1337)
x = np.abs(rng.rand(910))
XA = np.vstack([x, x])
D = cosine_distances(XA)
assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
# check that all elements are in [0, 2]
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])
XB = np.vstack([x, -x])
D2 = cosine_distances(XB)
# check that all elements are in [0, 2]
assert_true(np.all(D2 >= 0.))
assert_true(np.all(D2 <= 2.))
# check that diagonal elements are equal to 0 and non diagonal to 2
assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])
# check large random matrix
X = np.abs(rng.rand(1000, 5000))
D = cosine_distances(X)
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
jorge2703/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
DinoV/Azure-MachineLearning-ClientLibrary-Python | azureml/services.py | 2 | 35010 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#--------------------------------------------------------------------------
"""
Supports publishing and consuming published services that execute within the AzureML
web service execution framework.
Existing services can be consumed using the service decorator:
from azureml import services
@services.service(url, api_key)
@services.types(a = float, b = float)
@services.returns(float)
def some_service(a, b):
pass
Where the url and api_key are specified for the published web service.
Python functions can be published using the @publish decorator:
@services.publish(workspace, workspace_key)
@services.types(a = float, b = float)
@services.returns(float)
def float_typed(a, b):
return a / b
The function will be published under a newly created endpoint.
Publish can also be called programmatically instead:
published = services.publish(myfunc2, workspace, workspace_key)
The types and returns decorators can be used to provide type information about the
inputs and outputs. These types will be visible on the help page and enable clients
written in other languages to call published Python functions.
If types aren't specified then core Python types will be serialized in a custom manner.
This allows working with many common types such as lists, dictionaries, numpy types, etc...
But interop with other languages will be much more difficult.
Files can also be attached to published functions using the @attach decorator:
@services.publish(workspace, workspace_key)
@services.attach('foo.txt')
def attached():
return ''.join(file('foo.txt').readlines())
"""
from functools import update_wrapper
import codecs
import inspect
import re
import requests
import uuid
import sys
import json
import base64
import zipfile
import dis
from collections import deque, OrderedDict
from types import CodeType, FunctionType, ModuleType
import types as typesmod
try:
import cPickle as pickle
except:
import pickle
try:
from io import BytesIO
except:
from cStringIO import StringIO as BytesIO
try:
import azureml
except:
# We are published, we won't call publish_worker again.
pass
try:
import numpy
except:
numpy = None
try:
import pandas
except:
pandas = None
_LOAD_GLOBAL = dis.opmap['LOAD_GLOBAL']
#################################################
# Serialization/Deserialization of inputs. This code is distinct from the
# serialization of the user defined function. The user defined function can contain
# arbitrary objects and is fully trusted (so we can use pickle). The inputs to the function
# are coming from arbitrary user input and so need to support a more limited form
# of serialization.
#
# Serialization of the arguments is done using JSON. Each argument is serialized with
# a type and a value. The type is a known type name (int, bool, float, etc...) and the
# value is the serialized value in string format. Usually this is the simplest possible
# representation. Strings are serialized as is, ints/floats we just call str() on, etc...
# For byte arrays we base64 encode them. For data structures we store a list of the elements
# which are encoded in the same way. For example a list would have a list of dictionaries
# in JSON which each have a type and value member.
_serializers = {}
_deserializers = {}
def serializer(type):
def l(func):
_serializers[type] = func
return func
return l
def deserializer(type):
def l(func):
_deserializers[type] = func
return func
return l
# Type: bool
@serializer(bool)
def _serialize_bool(inp, memo):
return {'type': 'bool', 'value': 'true' if inp else 'false' }
@deserializer('bool')
def _deserialize_bool(value):
if value['value'] == 'true':
return True
else:
return False
# Type: int
@serializer(int)
def _serialize_int(inp, memo):
return {'type': 'int', 'value': str(inp) }
@deserializer('int')
def _deserialize_int(value):
return int(value['value'])
if sys.version_info < (3, ):
# long
@serializer(long)
def _serialize_long(inp, memo):
return {'type': 'long', 'value': str(inp) }
@deserializer('long')
def _deserialize_long(value):
return long(value['value'])
# Type: float
@serializer(float)
def _serialize_float(inp, memo):
return {'type': 'float', 'value': str(inp) }
@deserializer('float')
def _deserialize_float(value):
return float(value['value'])
# Type: complex
@serializer(complex)
def _serialize_complex(inp, memo):
return {'type': 'complex', 'value': str(inp) }
@deserializer('complex')
def _deserialize_bool(value):
return complex(value['value'])
# Type: unicode
@serializer(str if sys.version_info >= (3,) else unicode)
def _serialize_unicode(inp, memo):
return {'type': 'unicode', 'value': str(inp) }
@deserializer('unicode')
def _deserialize_unicode(value):
return value['value']
# Type: byte arrays
@serializer(bytes if sys.version_info >= (3,) else str)
def _serialize_bytes(inp, memo):
data = base64.encodestring(inp)
if sys.version_info >= (3, ):
data = data.decode('utf8')
return {'type': 'bytes', 'value': data.replace(chr(10), '') }
@deserializer('bytes')
def _deserialize_bytes(value):
data = value['value']
if sys.version_info >= (3, ):
data = data.encode('utf8')
return base64.decodestring(data)
# Type: dictionaries
@serializer(dict)
def serialize_dict(inp, memo):
return {
'type': 'dict',
'value' : [(_encode(k, memo), _encode(inp[k], memo)) for k in inp]
}
@deserializer('dict')
def _deserialize_dict(value):
return { _decode_inner(k):_decode_inner(v) for k, v in value['value'] }
# Type: None/null
@serializer(type(None))
def serialize_none(inp, memo):
return {'type':'null', 'value':'null'}
@deserializer('null')
def _deserialize_null(value):
return None
# Type: list and tuple
@serializer(list)
@serializer(tuple)
def _serialize_list_or_tuple(inp, memo):
res = []
for value in inp:
res.append(_encode(value, memo))
return {'type': type(inp).__name__, 'value': res }
@deserializer('list')
def _deserialize_list(value):
return [_decode_inner(x) for x in value['value']]
@deserializer('tuple')
def _deserialize_tuple(value):
return tuple(_decode_inner(x) for x in value['value'])
if numpy is not None:
# ndarray is serialized as (shape, datatype, data)
@serializer(numpy.ndarray)
def serialize_ndarray(inp, memo):
return {
'type':'numpy.ndarray',
'value': (
_encode(inp.shape, memo),
_encode(inp.dtype.name, memo),
_encode(inp.tostring(), memo)
)
}
@deserializer('numpy.ndarray')
def deserialize_ndarray(value):
shape, dtype, data = value['value']
return numpy.ndarray(
_decode_inner(shape), _decode_inner(dtype), _decode_inner(data)
)
# TODO: Need better story here...
@serializer(numpy.int32)
def serialize_numpy_int32(inp, memo):
return _serialize_int(inp, memo)
@serializer(numpy.int64)
def serialize_numpy_int64(inp, memo):
if sys.version_info >= (3, ):
return _serialize_int(inp, memo)
return _serialize_long(inp, memo)
@serializer(numpy.float64)
def serialize_numpy_float64(inp, memo):
return _serialize_float(inp, memo)
# Core deserialization functions. There's a top-level one used when
# actually reading/writing values, and an inner one when we're doing the
# recursive serialization/deserialization.
def _decode_inner(value):
val_type = value['type']
deserializer = _deserializers.get(value['type'])
if deserializer is None:
raise ValueError("unsupported type: " + value['type'])
return deserializer(value)
def _encode(inp, memo = None):
outer = False
if memo is None:
outer = True
memo = {}
if id(inp) in memo and type(inp) in [list, tuple, dict]:
raise ValueError('circular reference detected')
memo[id(inp)] = inp
serializer = _serializers.get(type(inp))
if serializer is None:
raise TypeError("Unsupported type for invocation: " + type(inp).__module__ + '.' + type(inp).__name__)
res = serializer(inp, memo)
if outer:
return json.dumps(res)
return res
def _decode(inp):
value = json.loads(inp)
if isinstance(value, dict):
return _decode_inner(value)
raise TypeError('expected a dictionary, got ' + type(inp).__name__)
PUBLISH_URL_FORMAT = '{}/workspaces/{}/webservices/{}'
if sys.version_info >= (3, 0):
_code_args = ['co_argcount', 'co_kwonlyargcount', 'co_nlocals', 'co_stacksize', 'co_flags',
'co_code', 'co_consts', 'co_names', 'co_varnames', 'co_filename', 'co_name',
'co_firstlineno', 'co_lnotab', 'co_freevars', 'co_cellvars']
_func_args = ['__name__', '__defaults__', '__closure__']
else:
_code_args = ['co_argcount', 'co_nlocals', 'co_stacksize', 'co_flags', 'co_code', 'co_consts',
'co_names', 'co_varnames', 'co_filename', 'co_name', 'co_firstlineno', 'co_lnotab',
'co_freevars', 'co_cellvars']
_func_args = ['func_name', 'func_defaults', 'func_closure']
class _Serializer(object):
'''serializes the specified functions, and the globals it uses as well.
normal globals are just serialized as-is, they must be picklable to do so.
other functions which are referenced are serialized as an additional function, and
will be repopulated in globals. This allows things like mutually recursive functions
to exist.
'''
def __init__(self):
self.functions = set()
self.queue = deque()
if sys.version_info < (3, ):
CLASS_TYPES = (typesmod.ClassType, type)
else:
CLASS_TYPES = type
def serialize(self, obj):
self.queue.append(('func', obj.__name__, obj))
self.functions.add((obj.__name__, obj))
self.mod = obj.__module__
return self.serialize_obj(obj)
def serialize_obj(self, obj):
res = []
while self.queue:
objType, name, cur = self.queue.popleft()
if objType == 'func':
res.append((objType, name, self.get_code_args(cur)))
elif objType == 'mod':
res.append((objType, name, cur.__name__))
elif objType == 'type':
raise NotImplementedError('new style class not supported')
elif objType == 'oldclass':
res.append((objType, name, [cur.__name__, cur.__module__, cur.__bases__, {n:self.serialize_obj(v) for n, v in cur.__dict__.items()}]))
else:
raise Exception('Unknown serialization type')
return pickle.dumps(res)
@staticmethod
def find_globals(code):
"""walks the byte code to find the variables which are actually globals"""
cur_byte = 0
byte_code = code.co_code
names = set()
while cur_byte < len(byte_code):
op = ord(byte_code[cur_byte])
if op >= dis.HAVE_ARGUMENT:
if op == _LOAD_GLOBAL:
oparg = ord(byte_code[cur_byte + 1]) + (ord(byte_code[cur_byte + 2]) << 8)
name = code.co_names[oparg]
names.add(name)
cur_byte += 2
cur_byte += 1
return names
def get_code_args(self, func):
code = func.__code__
codeArgs = [getattr(code, name) for name in _code_args]
funcArgs = [getattr(func, name) for name in _func_args]
globals = {}
for name in self.find_globals(code):
if name in func.__globals__:
value = func.__globals__[name]
if isinstance(value, FunctionType):
if (name, value) not in self.functions:
self.queue.append(('func', name, value))
self.functions.add((name, value))
elif isinstance(value, ModuleType):
self.queue.append(('mod', name, value))
elif isinstance(value, _Serializer.CLASS_TYPES) and value.__module__ == self.mod:
# class that needs to be serialized...
if isinstance(value, type):
# new-style class
self.queue.append(('type', name, value))
else:
# old-style class
self.queue.append(('oldclass', name, value))
else:
globals[name] = value
return pickle.dumps((codeArgs, funcArgs, globals))
def _serialize_func(func):
return _Serializer().serialize(func)
def _deserialize_func(funcs, globalDict):
items = pickle.loads(funcs)
res = None
for objType, name, data in items:
if objType == 'func':
codeArgs, funcArgs, updatedGlobals = pickle.loads(data)
code = CodeType(*codeArgs)
globalDict.update(**updatedGlobals)
value = FunctionType(code, globalDict, *funcArgs)
elif objType == 'mod':
value = __import__(data)
elif objType == 'oldclass':
class_name, module, bases, class_dict = data
value = typesmod.ClassType(class_name, bases, {k:_deserialize_func(v, globalDict) for k, v in class_dict.items()})
value.__module__ = module
elif objType == 'type':
raise Exception('deserialize type')
else:
raise Exception('Unknown serialization type')
globalDict[name] = value
if res is None:
res = value
return res
def _get_args(func):
raw_schema = _get_dataframe_schema(func)
if raw_schema is not None:
return list(raw_schema.keys())
args = inspect.getargs(func.__code__)
all_args = args.args
if args.varargs is not None:
all_args.append(args.varargs)
if args.keywords is not None:
all_args.append(args.keywords)
return all_args
def _encode_arg(arg, type):
if type == OBJECT_NAME:
return _encode(arg)
elif type['type'].lower() == 'string':
return arg
return json.dumps(arg)
def _decode_one_response(response, real_type):
if real_type == OBJECT_NAME:
return _decode(response[0])
elif real_type['type'].lower() == 'string':
return response[0]
# TODO: These shouldn't be necessary, AzureML is returning things to us oddly...
if response[0] == 'True':
return True
elif response[0] == 'False':
return False
return json.loads(response[0])
def _get_dict_type(column, index, type, types):
if type is not None and column in type:
return _annotation_to_type(type[column])
return {'type': types[index]}
def _decode_response(columns, types, response, type):
if isinstance(type, tuple):
# multi-value decode...
return tuple(_decode_one_response((r, ), _annotation_to_type(t)) for r, t in zip(response, type))
elif isinstance(type, dict):
return {c:_decode_one_response((r, ), _get_dict_type(c, i, type, types)) for (i, c), r in zip(enumerate(columns), response)}
elif columns is not None and len(columns) > 1:
return {c:_decode_one_response((r, ), {'type': types[i]}) for (i, c), r in zip(enumerate(columns), response)}
return _decode_one_response(response, _annotation_to_type(type))
class published(object):
"""The result of publishing a service or marking a method as being published.
Supports being called to invoke the remote service, iteration for unpacking the url,
api key, and help url, or the url, api_key, and help_url can be accessed directly
as attributes.
"""
def __init__(self, url, api_key, help_url, func, service_id):
self.url = url
self.api_key = api_key
self.help_url = help_url
self.func = func
self.service_id = service_id
def __repr__(self):
return '<service {} at {}>'.format(self.func.__name__, self.url)
def _invoke(self, call_args):
body = {
"Inputs": {
getattr(self.func, '__input_name__', 'input1'): {
"ColumnNames": _get_args(self.func),
"Values": call_args,
}
},
"GlobalParameters": {}
}
resp = requests.post(
self.url,
json=body,
headers={
'authorization': 'bearer ' + self.api_key,
}
)
r = resp.json()
if resp.status_code >= 300:
try:
code = r['error']['code']
except LookupError:
code = None
if code in ('ModuleExecutionError', 'Unauthorized'):
raise RuntimeError(r['error']['details'][0]['message'])
raise ValueError(str(r))
return r
def _map_args(self, *args, **kwargs):
args = inspect.getcallargs(self.func, *args, **kwargs)
return [ _encode_arg(args[name], _get_arg_type(name, self.func)) for name in _get_args(self.func) ]
def __call__(self, *args, **kwargs):
# Call remote function
r = self._invoke([ self._map_args(*args, **kwargs) ])
output_name = getattr(self.func, '__output_name__', 'output1')
return _decode_response(
r["Results"][output_name]["value"].get("ColumnNames"),
r["Results"][output_name]["value"].get("ColumnTypes"),
r["Results"][output_name]["value"]["Values"][0],
_get_annotation('return', self.func)
)
def map(self, *args):
"""maps the function onto multiple inputs. The input should be multiple sequences. The
sequences will be zipped together forming the positional arguments for the call. This is
equivalent to map(func, ...) but is executed with a single network call."""
call_args = [self._map_args(*cur_args) for cur_args in zip(*args)]
r = self._invoke(call_args)
ret_type = _get_annotation('return', self.func)
output_name = getattr(self.func, '__output_name__', 'output1')
return [_decode_response(
r['Results'][output_name]['value'].get("ColumnNames"),
r['Results'][output_name]['value'].get("ColumnTypes"),
x,
ret_type)
for x in r['Results']['output1']['value']['Values']]
def delete(self):
"""unpublishes the service"""
raise NotImplementedError('delete not implemented yet')
def __iter__(self):
yield self.url
yield self.api_key
yield self.help_url
def _get_dataframe_schema(function):
return getattr(function, '__dataframe_schema__', None)
def _get_main_source(function):
main_source = u'def azureml_main(df1 = None, df2 = None):\n'
main_source += u' results = []\n'
if _get_dataframe_schema(function):
# function just takes a dataframe...
main_source += u' results.append(__user_function(df1))' + chr(10)
else:
# we're marshalling the arguments in.
main_source += u' for i in range(df1.shape[0]):' + chr(10)
for arg in _get_args(function):
arg_type = _get_arg_type(arg, function)
if pandas is not None and arg_type is pandas.DataFrame:
raise Exception('Only a single DataFrame argument is supported')
if _get_arg_type(arg, function) == OBJECT_NAME:
main_source += ' ' + arg + u' = ' + u'_decode(df1["' + arg + u'"][i])' + chr(10)
else:
main_source += ' ' + arg + u' = ' + u'df1["' + arg + u'"][i]' + chr(10)
main_source += u' results.append(__user_function('
args = inspect.getargs(function.__code__)
all_args = args.args
if args.varargs is not None:
all_args.append(u'*' + args.varargs)
if args.keywords is not None:
all_args.append(u'**' + args.keywords)
# pass position arguments...
main_source += u', '.join(all_args)
main_source += u'))' + chr(10)
ret_annotation = _get_annotation('return', function)
if _get_dataframe_schema(function):
# function just returns a data frame directly
main_source += u' if len(results) == 1:' + chr(10)
main_source += u' return results[0]' + chr(10)
main_source += u' return pandas.DataFrame(results)' + chr(10)
elif isinstance(ret_annotation, tuple):
# multi-value return support...
format = []
arg_names = []
for index, ret_type in enumerate(ret_annotation):
arg_names.append(u'r' + str(index))
t = _annotation_to_type(ret_type)
if t == OBJECT_NAME:
format.append(u'_encode(r' + str(index) + u')')
else:
format.append(u'r' + str(index))
main_source += u' return pandas.DataFrame([(' + u', '.join(format) + u') for ' + ', '.join(arg_names) + u' in results])' + chr(10)
elif _get_arg_type('return', function) == OBJECT_NAME:
main_source += u' return pandas.DataFrame([_encode(r) for r in results])' + chr(10)
else:
main_source += u' return pandas.DataFrame(results)' + chr(10)
return main_source
def _get_source(function):
source_file = inspect.getsourcefile(function)
encoding = ''
try:
with open(source_file, 'rb') as source_file:
line1 = source_file.readline()
line2 = source_file.readline()
if line1[:3] == '\xef\xbb\xbf':
encoding = 'utf-8-sig'
else:
match = re.search(b"coding[:=]\s*([-\w.]+)", line1) or re.search(b"coding[:=]\s*([-\w.]+)", line2)
if match:
encoding = match.groups()[0]
with codecs.open(source_file, 'r', encoding) as source_file:
source_text = source_file.read()
except:
source_text = None
# include our source code...
ourfile = __file__
if ourfile.endswith('.pyc'):
ourfile = ourfile[:-1]
if encoding:
source = u'# coding=' + encoding.decode('ascii')
with codecs.open(ourfile, 'r', 'ascii') as services_file:
source = services_file.read()
main_source = _get_main_source(function)
source += chr(10) + main_source
if source_text is None:
# we're in a REPL environment, we need to serialize the code...
#TODO: Remove base64 encoding when json double escape issue is fixed
source += inspect.getsource(_deserialize_func)
source += chr(10)
source += u'__user_function = _deserialize_func(base64.decodestring(' + repr(base64.encodestring(_serialize_func(function)).replace(chr(10), '')) + '), globals())'
else:
# we can upload the source code itself...
source += u'''
# overwrite publish/service with ones which won't re-publish...
import sys
sys.modules['azureml'] = azureml = type(sys)('azureml')
sys.modules['azureml.services'] = services = type(sys)('services')
azureml.services = services
def publish(func, *args, **kwargs):
if callable(func):
return func
def wrapper(func):
return func
return wrapper
services.publish = publish
def service(*args):
def wrapper(func):
return func
return wrapper
def attach(*args, **kwargs):
def wrapper(func):
return func
return wrapper
services.service = service
services.types = types
services.returns = returns
services.attach = attach
services.dataframe_service = attach
services.service_id = attach
'''
source += source_text
source += chr(10)
source += u'__user_function = ' + function.__name__
return source
_known_types = {
int: {'type':'integer', 'format':'int64'},
bool: {'type' : 'Boolean'},
float: {'type': 'number', 'format':'double'},
str if sys.version_info > (3, ) else unicode: {'type':'string'},
#complex:'Complex64',
}
OBJECT_NAME = {"type":"string", "format":"string"} # "description":"Python custom serialization"
def _get_annotation(name, func):
try:
annotations = func.__annotations__
except AttributeError:
return None
return annotations.get(name)
def _annotation_to_type(annotation):
if annotation is None:
return OBJECT_NAME
if isinstance(annotation, str):
# allow the user to specify the raw string value that will be passed...
return annotation
return _known_types.get(annotation) or OBJECT_NAME
def _get_arg_type(name, func):
if name != "return":
raw_schema = _get_dataframe_schema(func)
if raw_schema is not None:
return _annotation_to_type(raw_schema[name])
annotation = _get_annotation(name, func)
return _annotation_to_type(annotation)
def _add_file(adding, zip_file):
if isinstance(adding, tuple):
name, contents = adding
else:
name = adding
contents = None
if isinstance(name, tuple):
name, dest_name = name
else:
name = dest_name = name
if contents is None:
contents = file(name, 'rb').read()
zip_file.writestr(dest_name, contents)
_DEBUG = False
def _publish_worker(func, files, workspace_id = None, workspace_token = None, management_endpoint = None):
workspace_id, workspace_token, _, management_endpoint = azureml._get_workspace_info(workspace_id, workspace_token, None, management_endpoint)
script_code = _get_source(func) + chr(10)
ret_type = _get_annotation('return', func)
if isinstance(ret_type, tuple):
# multi-value return
results = OrderedDict()
for index, obj_type in enumerate(ret_type):
results['result' + str(index)] = _annotation_to_type(obj_type)
elif isinstance(ret_type, dict):
# multi-value return
results = OrderedDict()
for name, obj_type in ret_type.items():
results[name] = _annotation_to_type(obj_type)
else:
results = {"result": _get_arg_type('return', func)}
code_bundle = {
"InputSchema": {name: _get_arg_type(name, func) for name in _get_args(func)},
"OutputSchema": results,
"Language" : "python-2.7-64",
"SourceCode": script_code,
}
attachments = getattr(func, '__attachments__', None)
if attachments or files:
data = BytesIO()
zip_file = zipfile.PyZipFile(data, 'w')
if attachments:
for adding in attachments:
_add_file(adding, zip_file)
if files:
for adding in files:
_add_file(adding, zip_file)
zip_file.close()
code_bundle['ZipContents'] = base64.b64encode(data.getvalue())
name = getattr(func, '__service_name__', func.__name__)
body = {
"Name": name,
"Type":"Code",
"CodeBundle" : code_bundle
}
id = str(getattr(func, '__service_id__', uuid.uuid4())).replace('-', '')
url = PUBLISH_URL_FORMAT.format(management_endpoint, workspace_id, id)
headers = {'authorization': 'bearer ' + workspace_token}
resp = requests.put(
url,
json=body,
headers=headers
)
if _DEBUG:
with open(func.__name__ + '.req', 'w') as f:
f.write(url + chr(10))
f.write(json.dumps(body))
f.close()
with open(func.__name__ + '.res', 'w') as f:
f.write(str(resp.status_code) + chr(10))
f.write(resp.text + chr(10))
f.close()
if resp.status_code < 200 or resp.status_code > 299:
try:
msg = resp.json()['error']['message']
except:
msg = str(resp.status_code)
raise ValueError('Failed to publish function: ' + msg + chr(10) +
'Set azureml.services._DEBUG = True to enable writing {}.req/{}.res files'.format(func.__name__, func.__name__))
j = resp.json()
epUrl = url + '/endpoints/' + j['DefaultEndpointName']
epResp = requests.get(epUrl, headers=headers)
endpoints = epResp.json()
url = endpoints['ApiLocation'] + '/execute?api-version=2.0'
return published(url, endpoints['PrimaryKey'], endpoints['HelpLocation'] + '/score', func, id)
def publish(func_or_workspace_id, workspace_id_or_token = None, workspace_token_or_none = None, files=(), endpoint=None):
'''publishes a callable function or decorates a function to be published.
Returns a callable, iterable object. Calling the object will invoke the published service.
Iterating the object will give the API URL, API key, and API help url.
To define a function which will be published to Azure you can simply decorate it with
the @publish decorator. This will publish the service, and then future calls to the
function will run against the operationalized version of the service in the cloud.
>>> @publish(workspace_id, workspace_token)
>>> def func(a, b):
>>> return a + b
After publishing you can then invoke the function using:
func.service(1, 2)
Or continue to invoke the function locally:
func(1, 2)
You can also just call publish directly to publish a function:
>>> def func(a, b): return a + b
>>>
>>> res = publish(func, workspace_id, workspace_token)
>>>
>>> url, api_key, help_url = res
>>> res(2, 3)
5
>>> url, api_key, help_url = res.url, res.api_key, res.help_url
The returned result will be the published service.
You can specify a list of files which should be published along with the function.
The resulting files will be stored in a subdirectory called 'Script Bundle'. The
list of files can be one of:
(('file1.txt', None), ) # file is read from disk
(('file1.txt', b'contents'), ) # file contents are provided
('file1.txt', 'file2.txt') # files are read from disk, written with same filename
((('file1.txt', 'destname.txt'), None), ) # file is read from disk, written with different destination name
The various formats for each filename can be freely mixed and matched.
'''
if not callable(func_or_workspace_id):
def do_publish(func):
func.service = _publish_worker(func, files, func_or_workspace_id, workspace_id_or_token, endpoint)
return func
return do_publish
return _publish_worker(func_or_workspace_id, files, workspace_id_or_token, workspace_token_or_none, endpoint)
def service(url, api_key, help_url = None):
'''Marks a function as having been published and causes all invocations to go to the remote
operationalized service.
>>> @service(url, api_key)
>>> def f(a, b):
>>> pass
'''
def do_publish(func):
return published(url, api_key, help_url, func, None)
return do_publish
def types(**args):
"""Specifies the types used for the arguments of a published service.
@types(a=int, b = str)
def f(a, b):
pass
"""
def l(func):
if hasattr(func, '__annotations__'):
func.__annotations__.update(args)
else:
func.__annotations__ = args
return func
return l
def returns(type):
"""Specifies the return type for a published service.
@returns(int)
def f(...):
pass
"""
def l(func):
if hasattr(func, '__annotations__'):
func.__annotations__['return'] = type
else:
func.__annotations__ = {'return': type}
return func
return l
def attach(name, contents = None):
"""attaches a file to the payload to be uploaded.
If contents is omitted the file is read from disk.
If name is a tuple it specifies the on-disk filename and the destination filename.
"""
def do_attach(func):
if hasattr(func, '__attachments__'):
func.__attachments__.append((name, contents))
else:
func.__attachments__ = [(name, contents)]
return func
return do_attach
def service_id(id):
"""Specifies the service ID to enable re-publishing to the same end point.
Can be applied to the function which is being published:
@publish(...)
@service_id('e5dd3903-796f-4544-b7aa-f4e08b2cc639')
def myfunc():
return 42
When the function is published it will replace any existing instances of the
function.
"""
def l(func):
func.__service_id__ = id
return func
return l
def name(name):
"""Provides a friendly name for the published web service which can include spaces and other characters illegal for Python functions.
"""
def l(func):
func.__service_name__ = name
return func
return l
def dataframe_service(**args):
"""Indicates that the function operations on a data frame. The function
will receive a single input in the form of a data frame, and should return
a data frame object. The schema of the data frame is specified with this
decorator.
@publish(...)
@dataframe_service(a = int, b = int)
def myfunc(df):
return pandas.DataFrame([df['a'][i] + df['b'][i] for i in range(df.shape[0])])
"""
def l(func):
func.__dataframe_schema__ = args
return func
return l
def input_name(name):
"""specifies the name of the input the web service expects to receive. Defaults to 'input1'"""
def l(func):
func.__input_name__ = name
return func
return l
def output_name(name):
"""specifies the name of the input the web service expects to receive. Defaults to 'input1'"""
def l(func):
func.__output_name__ = name
return func
return l | mit |
ContinuumIO/dask | dask/sizeof.py | 1 | 4406 | import random
import sys
from distutils.version import LooseVersion
from .utils import Dispatch
try: # PyPy does not support sys.getsizeof
sys.getsizeof(1)
getsizeof = sys.getsizeof
except (AttributeError, TypeError): # Monkey patch
def getsizeof(x):
return 100
sizeof = Dispatch(name="sizeof")
@sizeof.register(object)
def sizeof_default(o):
return getsizeof(o)
@sizeof.register(list)
@sizeof.register(tuple)
@sizeof.register(set)
@sizeof.register(frozenset)
def sizeof_python_collection(seq):
num_items = len(seq)
samples = 10
if num_items > samples:
s = getsizeof(seq) + num_items / samples * sum(
map(sizeof, random.sample(seq, samples))
)
return int(s)
else:
return getsizeof(seq) + sum(map(sizeof, seq))
@sizeof.register(dict)
def sizeof_python_dict(d):
return (
getsizeof(d)
+ sizeof(list(d.keys()))
+ sizeof(list(d.values()))
- 2 * sizeof(list())
)
@sizeof.register_lazy("cupy")
def register_cupy():
import cupy
@sizeof.register(cupy.ndarray)
def sizeof_cupy_ndarray(x):
return int(x.nbytes)
@sizeof.register_lazy("numba")
def register_numba():
import numba.cuda
@sizeof.register(numba.cuda.cudadrv.devicearray.DeviceNDArray)
def sizeof_numba_devicendarray(x):
return int(x.nbytes)
@sizeof.register_lazy("rmm")
def register_rmm():
import rmm
# Only included in 0.11.0+
if hasattr(rmm, "DeviceBuffer"):
@sizeof.register(rmm.DeviceBuffer)
def sizeof_rmm_devicebuffer(x):
return int(x.nbytes)
@sizeof.register_lazy("numpy")
def register_numpy():
import numpy as np
@sizeof.register(np.ndarray)
def sizeof_numpy_ndarray(x):
return int(x.nbytes)
@sizeof.register_lazy("pandas")
def register_pandas():
import pandas as pd
import numpy as np
def object_size(x):
if not len(x):
return 0
sample = np.random.choice(x, size=20, replace=True)
sample = list(map(sizeof, sample))
return sum(sample) / 20 * len(x)
@sizeof.register(pd.DataFrame)
def sizeof_pandas_dataframe(df):
p = sizeof(df.index)
for name, col in df.iteritems():
p += col.memory_usage(index=False)
if col.dtype == object:
p += object_size(col._values)
return int(p) + 1000
@sizeof.register(pd.Series)
def sizeof_pandas_series(s):
p = int(s.memory_usage(index=True))
if s.dtype == object:
p += object_size(s._values)
if s.index.dtype == object:
p += object_size(s.index)
return int(p) + 1000
@sizeof.register(pd.Index)
def sizeof_pandas_index(i):
p = int(i.memory_usage())
if i.dtype == object:
p += object_size(i)
return int(p) + 1000
@sizeof.register(pd.MultiIndex)
def sizeof_pandas_multiindex(i):
p = int(sum(object_size(l) for l in i.levels))
for c in i.codes if hasattr(i, "codes") else i.labels:
p += c.nbytes
return int(p) + 1000
@sizeof.register_lazy("scipy")
def register_spmatrix():
from scipy import sparse
@sizeof.register(sparse.dok_matrix)
def sizeof_spmatrix_dok(s):
return s.__sizeof__()
@sizeof.register(sparse.spmatrix)
def sizeof_spmatrix(s):
return sum(sizeof(v) for v in s.__dict__.values())
@sizeof.register_lazy("pyarrow")
def register_pyarrow():
import pyarrow as pa
def _get_col_size(data):
p = 0
if not isinstance(data, pa.ChunkedArray):
data = data.data # pyarrow <0.15.0
for chunk in data.iterchunks():
for buffer in chunk.buffers():
if buffer:
p += buffer.size
return p
@sizeof.register(pa.Table)
def sizeof_pyarrow_table(table):
p = sizeof(table.schema.metadata)
for col in table.itercolumns():
p += _get_col_size(col)
return int(p) + 1000
@sizeof.register(pa.ChunkedArray)
def sizeof_pyarrow_chunked_array(data):
return int(_get_col_size(data)) + 1000
# Handle pa.Column for pyarrow < 0.15
if pa.__version__ < LooseVersion("0.15.0"):
@sizeof.register(pa.Column)
def sizeof_pyarrow_column(col):
return int(_get_col_size(col)) + 1000
| bsd-3-clause |
aymeric-spiga/planetoplot | tutorial/planetoplot_tutorial.py | 1 | 12257 |
# coding: utf-8
# # PLANETOPLOT tutorial
#
# *Author: [Aymeric SPIGA](http://www.lmd.jussieu.fr/~aslmd)*
#
# PLANETOPLOT is a plotting/mapping tool based on popular Python librairies. Here the great work done by the [matplotlib](http://matplotlib.org/) and [basemap](http://matplotlib.org/basemap) teams shall be acknowledged.
#
# The tool is [available on Github](https://github.com/aymeric-spiga/planetoplot) under a GNU GPL licence. Please refer to [this page for download instructions](https://github.com/aymeric-spiga/planetoplot/wiki).
#
# Initially I developed it to learn more about Python and to build an unified tool I could use for my research (and within our research team). I hope it will be useful to you as well. Note that I am a research scientist, not a computer programmer: this tool comes as is, without guarantee.
#
# Below is a quick tutorial (and a sample gallery) to discover the tool. Download [here](http://www.lmd.jussieu.fr/~aslmd/planetoplot/diagfired.nc) the file used in this tutorial (a 32M file with predictions from our Mars Global Climate Model!).
#
# This tutorial cover the three kinds of use of PLANETOPLOT:
# * As a quick and convenient [command line](#commandline) tool
# * As a versatile Python [library](#library) to use in your own scripts (or in `ipython`)
# * As a source of interesting libraries for your work -- in a [modular](#modular) approach
#
# Enjoy!
# <small>NB: If you have `ipython notebook` installed on your computer, download and use this tutorial interactively with the command `ipython notebook planetoplot_tutorial.ipynb`</small>
# In[1]:
# This line configures matplotlib to show figures embedded in the notebook,
# instead of opening a new window for each figure.
# If you are using an old version of IPython, try using '%pylab inline' instead.
get_ipython().magic(u'matplotlib inline')
# <a id='commandline'></a>
# ## 1. Quick and convenient: Use PLANETOPLOT as a command line tool
# You can use PLANETOPLOT as a simple command line interface.
#
# This allows you to easily and quickly plot a field saved in a netCDF file.
#
# ### Important general remarks for a start
#
# * First thing to do is to discover all the available options is
#
# `pp.py -h`
#
# Note that options related to reading fields are lower case while those related to plotting fields are upper case.
#
#
# * Assume you have a netCDF file named `diagfired.nc`, the command
#
# `pp.py diagfired.nc`
#
# will give you some information on the available variables and on the xyzt dimensions that the program is able to recognize in the netCDF file
#
#
# * The general use of the command is
#
# `pp.py [options] file(s)` or, equivalently, `pp.py file(s) [options]`
#
# To obtain the same plot side-by-side for two different files `file1.nc` and `file2.nc`
#
# `pp.py [options] file1.nc file2.nc`
#
# This works for multiples files. And, of course, regular expressions can be used, as well as automatic completion
#
# `pp.py [options] file*.nc`
# `pp.py [options] file?.nc`
# `pp.py [options] file[1-6].nc`
#
#
# * In any example that follows, adding the option `--verbose` make the program describe what it is doing. This can be useful to understand how PLANETOPLOT works.
#
#
# * In any example that follows, the command can be saved to a txt file named `your_choice.sh` by using the option `-o your_choice`. This can be useful to store long commands for further reference.
#
#
# * In any example that follows, the figure is output through the great `matplotlib` GUI. To save the figure in one of the usual compressed or vector format, use the corresponding option: `-O png` or `-O jpg` or `-O eps` or `-O ps` or `-O svg` or `-O pdf`
#
#
# ### Tutorial examples
#
# A guided tutorial is worth a thousands words.
#
# Try the examples below with the file you just downloaded (if not, check out [here](http://www.lmd.jussieu.fr/~aslmd/planetoplot/diagfired.nc))
#
# *NB: in what follows, type each command in a terminal without `%run` (except if you use `ipython`)*
# **Example 1: Map a time-varying 2D variable tsurf by slicing at time value 0.9**
# In[3]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.9')
# ** Example 1bis: Same as Example 1 except use another [map projection](http://matplotlib.org/basemap/api/basemap_api.html) **
# *Robinson projection*
# In[3]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.9 -P robin')
# *Orthographic projection with point of view centered in longitude 90W and latitude 30N*
# In[4]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.9 -P ortho --blon -90 --blat 30')
# ** Example 1ter: Same as Example 1 except use another [colormap](http://matplotlib.org/examples/color/colormaps_reference.html) **
# In[5]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.9 -C hot')
# **Example 2: Map a time-varying 2D variable tsurf by slicing at several time values**
# In[6]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.5 -t 0.6 -t 0.7 -t 0.8')
# **Example 3: Reduce dimension further by slicing at longitude value 45 in addition to time value 0.9**
# In[7]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.9 -x 45')
# **Example 4: Superimpose two 1D slices at different time values**
# In[8]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.5 -t 0.7 -t 0.9 -x 45 -S')
# The color and legend for each curve can be set by using respectively the option `-Q` and `-E`
# In[9]:
get_ipython().magic(u'run pp.py diagfired.nc -v tsurf -t 0.5 -t 0.7 -t 0.9 -x 45 -S -Q red -E afternoon -Q purple -E evening -Q black -E night')
# ** Example 5: Obtain a yz section at longitude 0 and time value 0.9 of a time-varying 3D variable u **
# In[10]:
get_ipython().magic(u'run pp.py diagfired.nc -v u -t 0.9 -x 0')
# ** Example 6: Same as Example 5 except request a zonal average between -175° and 180° longitude **
# In[11]:
get_ipython().magic(u'run pp.py diagfired.nc -v u -t 0.9 -x -175,180')
# ** Example 7: Same as example 6 except request minimum values between -175° and 180° longitude **
# In[12]:
get_ipython().magic(u'run pp.py diagfired.nc -v u -t 0.9 -x -175,180 -u min')
# ** Example 8: Same as example 6 except change min and max displayed values, number of color levels, title, x-axis title, and y-title **
# In[13]:
get_ipython().magic(u'run pp.py diagfired.nc -v u -t 0.9 -x -175,180 -N -100 -M 100 -D 50 -T "Zonal mean for zonal wind $\\langle u \\rangle$" -X \'Latitude\' -Y \'Altitude (km)\'')
# ** Example 9: Same as example 8 except request two different 3D variables to be displayed with different settings **
# In[14]:
get_ipython().magic(u'run pp.py diagfired.nc -t 0.9 -x -175,180 -X \'Latitude\' -Y \'Altitude (km)\' -D 30 -F \'%.0f\' -v u -N -100 -M 100 -T "Zonal wind $\\langle u \\rangle$" -v temp -N 100 -M 250 -T "Temperature $\\langle T \\rangle$"')
# Note that we also used the option `-F` which impose the format of values in the colorbar. Here we set floats without decimals, but any `python` format can be used.
# ** Example 10: Map temperature at altitude 120 km and time value 0.9 with wind vectors superimposed (two components: u and v) **
# In[15]:
get_ipython().magic(u'run pp.py diagfired.nc -v temp -z 120 -t 0.9 -i u -j v')
# <a id='library'></a>
# ## 2. Versatile and complete: Make PLANETOPLOT python scripts
# Line-by-line description
# * Script header
# * Import `pp` object from `ppclass`
# * Define a `pp` object named `ex`
# (and make it quiet)
# * Define attributes
# - netCDF file to read
# - variable to read
# - value on y axis
# - value on z axis
# * Get data from netCDF file + Make plot
# In[16]:
#! /usr/bin/env python
from ppclass import pp
ex = pp()
ex.quiet = True
ex.file = 'diagfired.nc'
ex.var = 'temp'
ex.y = 15.
ex.z = 75.
ex.getplot()
# Here is an example on how to customize this plot
# In[17]:
ex.title = r'Hovmoller plot for $T_{75km}$'
ex.xcoeff = 24.
ex.xlabel = 'Time (hours)'
ex.ylabel = 'Altitude (km)'
ex.vmin = 130.
ex.vmax = 170.
ex.div = 50
ex.fmt = '%.0f'
ex.colorbar = "spectral"
ex.plot() # remake plot
# The first example in the [command line](#commandline) section
#
# `pp.py diagfired.nc -v tsurf -t 0.9`
#
# is equivalent to the following (relatively minimalist) script
# In[18]:
#! /usr/bin/env python
from ppclass import pp
mini = pp()
mini.quiet = True
mini.file = "diagfired.nc"
mini.var = "tsurf"
mini.t = 0.9
mini.xp = 4 # this line is just added to make figure below smaller
mini.yp = 3 # this line is just added to make figure below smaller
mini.getplot()
# and this script is equivalent to the following one using single-line syntax
# In[19]:
#! /usr/bin/env python
from ppclass import pp
mini = pp(quiet=True,file="diagfired.nc",var="tsurf",t=0.9,xp=4,yp=3).getplot()
# <a id='modular'></a>
# ## 3. Modular and powerful: Use individual PLANETOPLOT components in your scripts
# ### Reading netCDF only
#
# In `ppclass`, the `pp()` class features methods, on the one hand, to retrieve fields from netCDF files and, on the other hand, to plot those fields. However, it is easy to use only the former capability and not the latter. This is often the case when your own plotting recipes are preferred, or simply that complex operations or data analysis/filtering are needed prior to displaying the fields. This is easily done with the method `getf()`.
#
# For instance, reading the field `icetot` in the file `diagfired.nc` at time value 0.5 takes only one line
# In[20]:
#! /usr/bin/env python
from ppclass import pp
icetot = pp(file="diagfired.nc",var="icetot",t=0.5).getf()
print icetot[10,10]
# A more complex and versatile use of the getf() method -- and getfl() for labelling -- is summarized in this [example script](https://github.com/aymeric-spiga/planetoplot/blob/master/examples/ppclass_additional/easy_get_field.py)
# ### Plotting only: the `ppplot` class
# The `ppplot` class can be used without the `ppclass` class. This is for instance the case if one wants to plot fields that are not stored in netCDF format, or simply to plot anything within a `python` script with a quick and convenient solution.
#
# In the following example script, using a set of librairies including `ppplot`
# In[21]:
#! /usr/bin/env python
import urllib
import numpy as np
import ppplot
# a radiosounding stored somewhere on the web is imported and saved in a local file `sounding.txt`
# In[22]:
url="https://raw.githubusercontent.com/aymeric-spiga/planetoplot/master/examples/ppplot/sounding.txt"
sounding = urllib.urlopen(url).read()
soundingfile = open('sounding.txt','w').write(sounding)
# and loaded using `numpy.loadtxt()`
# In[23]:
press,z_alt,temp,dew,hum,mix,wdir,wknot,thta,thte,thtv = np.loadtxt("sounding.txt",skiprows=8,unpack=True)
# before the variables can be easily displayed using `ppplot` through a one-line instruction
# In[24]:
ppplot.plot1d(f=temp).makeshow()
# A more elaborate plot can be obtained as well by setting more attributes for the `plot1d` class object
# In[25]:
sd = ppplot.plot1d()
sd.f = z_alt
sd.x = temp
sd.linestyle = '-'
sd.marker = '.'
sd.color = 'r'
sd.ycoeff = 1.e-3
sd.title = "A random terrestrial sounding"
sd.legend = "Fort Smith 00Z 26 Sep 2013"
sd.xlabel = "Temperature ($^{\circ}$C)"
sd.ylabel = "Altitude (km)"
sd.makeshow()
# The command line script `asciiplot.py` was created on this basis to plot something from an ASCII file containing columns. This script shares common options with the PLANETOPLOT `pp.py` command line script.
# In[26]:
get_ipython().magic(u'run asciiplot.py -h')
# Here is a command line example doing something similar to the above script
# In[27]:
get_ipython().magic(u'run asciiplot.py sounding.txt -s 8 -x 3 -y 2')
# For the exact same result as what we got with the above script, a few options can be added
# In[28]:
get_ipython().magic(u'run asciiplot.py sounding.txt -s 8 -x 3 -y 2 -L \'-\' -K \'.\' -Q \'r\' --ycoeff 1.e-3 -T "A random terrestrial sounding" -E "Fort Smith 00Z 26 Sep 2013" -X "Temperature ($^{\\circ}$C)" -Y "Altitude (km)"')
| gpl-2.0 |
milankl/swm | plot/spec_plot.py | 1 | 2949 | ## EKE SPEC PLOT
from __future__ import print_function
path1 = '/network/aopp/cirrus/pred/kloewer/swm_back_ronew/'
path2 = '/network/aopp/cirrus/pred/kloewer/swm_bf_cntrl/data/'
outpath = '/network/home/aopp/kloewer/swm/paperplot/'
import os; os.chdir(path2) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
import glob
import matplotlib.pyplot as plt
from cmocean import cm
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['mathtext.rm'] = 'serif'
# OPTIONS
runfolder = [0,6,0,3,8]
print('Compare ek spec plots from run ' + str(runfolder))
runpath1 = path2+'run%04i' % runfolder[0]
D1 = np.load(runpath1+'/analysis/spec_eke.npy').all()
param1 = np.load(runpath1+'/param.npy').all()
runpath2 = path2+'run%04i' % runfolder[1]
D2 = np.load(runpath2+'/analysis/spec_eke.npy').all()
param2 = np.load(runpath2+'/param.npy').all()
runpath3 = path1+'run%04i' % runfolder[2]
D3 = np.load(runpath3+'/analysis/spec_eke.npy').all()
param3 = np.load(runpath3+'/param.npy').all()
runpath4 = path1+'run%04i' % runfolder[3]
D4 = np.load(runpath4+'/analysis/spec_eke.npy').all()
param4 = np.load(runpath4+'/param.npy').all()
runpath5 = path1+'run%04i' % runfolder[4]
D5 = np.load(runpath5+'/analysis/spec_eke.npy').all()
param5 = np.load(runpath5+'/param.npy').all()
Ro_max = param1['c_phase']/(param1['f_0'] - param1['beta']*param1['Ly']/2.)
Ro_min = param1['c_phase']/(param1['f_0'] + param1['beta']*param1['Ly']/2.)
## PLOTTING
kf = 1e3
fig,ax = plt.subplots(1,1,figsize=(7,5))
ax.loglog(D1['k']*kf,D1['p']/(2*np.pi),'C0',label=r'Low resolution $\Delta x = $30km',lw=2)
ax.loglog(D2['k']*kf,D2['p']/(2*np.pi),'C2',label=r'High resolution $\Delta x = $7.5km',lw=2)
ax.loglog(D3['k']*kf,D3['p']/(2*np.pi),'C1',label=r'LR + weak backscatter',ls='--')
ax.loglog(D4['k']*kf,D4['p']/(2*np.pi),'C3',label=r'LR + moderate backscatter',ls='--')
ax.loglog(D5['k']*kf,D5['p']/(2*np.pi),'C5',label=r'LR + strong backscatter',ls='--')
ylim = ax.get_ylim()
ax.loglog(kf/Ro_max*np.ones(2),ylim,'k',lw=0.5)
ax.loglog(kf/Ro_min*np.ones(2),ylim,'k',lw=0.5)
ax.text(1/1900,5e-1,'$L_{Ro}^{max}$',rotation=90,fontsize=12)
ax.text(1/590,5e-1,'$L_{Ro}^{min}$',rotation=90,fontsize=12)
k1 = D1['k'][7]*kf
k2 = D1['k'][-1]*kf
s = 2e-4
ax.loglog([k1,k2],[s*k1**(-3),s*k2**(-3)],'C4',label=r'$K^{-3}$')
xtick = np.array([4000,2000,1000,500,200,100,50,20,10])
xtickm = np.array([3000,900,800,700,600,400,300,90,80,70,60,40,30])
ax.set_xticks(1./xtick)
ax.set_xticks(1./xtickm,minor=True)
ax.set_xticklabels([],minor=True)
ax.set_xticklabels(xtick)
ax.set_xlim(D1['k'][1]*kf,1./xtick[-1])
ax.set_ylim(*ylim)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.set_ylabel('EKE [m$^3$s$^{-2}$]')
ax.set_xlabel('wavelength [km]')
ax.legend(loc=1)
ax.set_title('Eddy kinetic energy spectrum',loc='left')
plt.tight_layout()
plt.savefig(outpath+'plots/spec_eke.eps')
plt.close(fig) | gpl-3.0 |
wasit7/book_pae | pae/final_code/src/convert_allsub_tojson.py | 2 | 1651 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 23:20:29 2016
@author: Methinee
"""
import pandas as pd
import json
from collections import defaultdict
countEachSubSort = 0
key_sub_sort = defaultdict(list)
subjects = []
countSub = 0
node = []
link= []
out={}
sources=[]
targets=[]
df_file = pd.read_csv('../data/df_dropSub_less20.csv',delimiter=",", skip_blank_lines = True,
error_bad_lines=False,encoding='utf8')
headers=list(df_file.columns.values)
for sub in df_file['3COURSEID']:
if sub not in subjects:
subjects.append(sub)
# print "%s, index is %d"%(sub,subjects.index(sub))
countSub = countSub+1
node.append({"name":sub})
subjects.remove('CS231')
node.remove({"name":'CS231'})
subjects.sort()
node.sort()
# Find index of source and target from book/graph1.gv
df_st = pd.read_csv('../data/source-target_predict.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False)
headers_st=list(df_st.columns.values)
df_st = df_st.dropna()
for source in df_st[headers_st[0]]:
#print "source is %s, index is %d"%(source,subjects_db.index(source))
sources.append(subjects.index(source))
for target in df_st[headers_st[1]]:
#print "target is %s, index is %d"%(target,subjects_db.index(target))
targets.append(subjects.index(target))
for i in xrange(0,62): #In Bachelor has 70 links
link.append({"source":sources[i],"target":targets[i],"type": "licensing"})
out["node"]=node
out["link"]=link
#with open("subjects_111.json","w") as outfile:
# json.dump(out,outfile,sort_keys=True, indent=4, separators=(',',': '))
| mit |
fabioticconi/scikit-learn | sklearn/tests/test_isotonic.py | 11 | 13284 | import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
| bsd-3-clause |
ibis-project/ibis | ibis/backends/dask/tests/execution/test_join.py | 1 | 16060 | import dask.dataframe as dd
import pandas as pd
import pytest
from dask.dataframe.utils import tm
from pandas import Timedelta, date_range
from pytest import param
import ibis
import ibis.common.exceptions as com
# Note - computations in this file use the single threadsed scheduler (instead
# of the default multithreaded scheduler) in order to avoid a flaky interaction
# between dask and pandas in merges. There is evidence this has been fixed in
# pandas>=1.1.2 (or in other schedulers). For more background see:
# - https://github.com/dask/dask/issues/6454
# - https://github.com/dask/dask/issues/5060
pytestmark = pytest.mark.dask
join_type = pytest.mark.parametrize(
'how',
[
'inner',
'left',
'right',
'outer',
param(
'semi',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Semi join not implemented'
),
),
param(
'anti',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Anti join not implemented'
),
),
],
)
@join_type
def test_join(how, left, right, df1, df2):
expr = left.join(right, left.key == right.key, how=how)[
left, right.other_value, right.key3
]
result = expr.compile()
expected = dd.merge(df1, df2, how=how, on='key')
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
def test_cross_join(left, right, df1, df2):
expr = left.cross_join(right)[left, right.other_value, right.key3]
result = expr.compile()
expected = dd.merge(
df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy'
).rename(columns={'key_x': 'key'})
del expected['dummy'], expected['key_y']
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_join_project_left_table(how, left, right, df1, df2):
expr = left.join(right, left.key == right.key, how=how)[left, right.key3]
result = expr.compile()
expected = dd.merge(df1, df2, how=how, on='key')[
list(left.columns) + ['key3']
]
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
def test_cross_join_project_left_table(left, right, df1, df2):
expr = left.cross_join(right)[left, right.key3]
result = expr.compile()
expected = dd.merge(
df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy'
).rename(columns={'key_x': 'key'})[list(left.columns) + ['key3']]
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_join_with_multiple_predicates(how, left, right, df1, df2):
expr = left.join(
right, [left.key == right.key, left.key2 == right.key3], how=how
)[left, right.key3, right.other_value]
result = expr.compile()
expected = dd.merge(
df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3']
).reset_index(drop=True)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_join_with_multiple_predicates_written_as_one(
how, left, right, df1, df2
):
predicate = (left.key == right.key) & (left.key2 == right.key3)
expr = left.join(right, predicate, how=how)[
left, right.key3, right.other_value
]
result = expr.compile()
expected = dd.merge(
df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3']
).reset_index(drop=True)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_join_with_invalid_predicates(how, left, right):
predicate = (left.key == right.key) & (left.key2 <= right.key3)
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.compile()
predicate = left.key >= right.key
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.compile()
@join_type
@pytest.mark.xfail(reason='Hard to detect this case')
def test_join_with_duplicate_non_key_columns(how, left, right, df1, df2):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
expr = left.join(right, left.key == right.key, how=how)
# This is undefined behavior because `x` is duplicated. This is difficult
# to detect
with pytest.raises(ValueError):
expr.compile()
@join_type
def test_join_with_duplicate_non_key_columns_not_selected(
how, left, right, df1, df2
):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
right = right[['key', 'other_value']]
expr = left.join(right, left.key == right.key, how=how)[
left, right.other_value
]
result = expr.compile()
expected = dd.merge(
df1.assign(x=df1.value * 2),
df2[['key', 'other_value']],
how=how,
on='key',
)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_join_with_post_expression_selection(how, left, right, df1, df2):
join = left.join(right, left.key == right.key, how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.compile()
expected = dd.merge(df1, df2, on='key', how=how)[
['key', 'value', 'other_value']
]
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_join_with_post_expression_filter(how, left):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
expr = projected[projected.value == 4]
result = expr.compile()
df1 = lhs.compile()
df2 = rhs.compile()
expected = dd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
tm.assert_frame_equal(
result.compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_multi_join_with_post_expression_filter(how, left, df1):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
rhs2 = left[['key2', 'value']].relabel({'value': 'value2'})
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
filtered = projected[projected.value == 4]
joined2 = filtered.join(rhs2, 'key2')
projected2 = joined2[filtered.key, rhs2.value2]
expr = projected2[projected2.value2 == 3]
result = expr.compile()
df1 = lhs.compile()
df2 = rhs.compile()
df3 = rhs2.compile()
expected = dd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
expected = dd.merge(expected, df3, on='key2')[['key', 'value2']]
expected = expected.loc[expected.value2 == 3].reset_index(drop=True)
tm.assert_frame_equal(
result.compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@pytest.mark.xfail(reason="TODO - execute_materialized_join - #2553")
@join_type
def test_join_with_non_trivial_key(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.compile()
expected = (
dd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
)
.drop(['key_len', 'key_y', 'key2', 'key3'], axis=1)
.rename(columns={'key_x': 'key'})
)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@pytest.mark.xfail(reason="TODO - execute_materialized_join - #2553")
@join_type
def test_join_with_non_trivial_key_project_table(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left, right.other_value]
expr = expr[expr.key.length() == 1]
result = expr.compile()
expected = (
dd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
)
.drop(['key_len', 'key_y', 'key2', 'key3'], axis=1)
.rename(columns={'key_x': 'key'})
)
expected = expected.loc[expected.key.str.len() == 1]
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@join_type
def test_join_with_project_right_duplicate_column(client, how, left, df1, df3):
# also test that the order of operands in the predicate doesn't matter
right = client.table('df3')
join = left.join(right, ['key'], how=how)
expr = join[left.key, right.key2, right.other_value]
result = expr.compile()
expected = (
dd.merge(df1, df3, on='key', how=how)
.drop(['key2_x', 'key3', 'value'], axis=1)
.rename(columns={'key2_y': 'key2'})
)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
def test_join_with_window_function(
players_base, players_df, batting, batting_df
):
players = players_base
# this should be semi_join
tbl = batting.left_join(players, ['playerID'])
t = tbl[batting.G, batting.playerID, batting.teamID]
expr = t.groupby(t.teamID).mutate(
team_avg=lambda d: d.G.mean(),
demeaned_by_player=lambda d: d.G - d.G.mean(),
)
result = expr.compile()
expected = dd.merge(
batting_df, players_df[['playerID']], on='playerID', how='left'
)[['G', 'playerID', 'teamID']]
team_avg = expected.groupby('teamID').G.transform('mean')
expected = expected.assign(
team_avg=team_avg, demeaned_by_player=lambda df: df.G - team_avg
)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
merge_asof_minversion = pytest.mark.skipif(
pd.__version__ < '0.19.2',
reason="at least pandas-0.19.2 required for merge_asof",
)
@merge_asof_minversion
def test_asof_join(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(time_right, 'time')[
time_left, time_right.other_value
]
result = expr.compile()
expected = dd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@merge_asof_minversion
def test_asof_join_predicate(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(time_right, time_left.time == time_right.time)[
time_left, time_right.other_value
]
result = expr.compile()
expected = dd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@merge_asof_minversion
def test_keyed_asof_join(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2
):
expr = time_keyed_left.asof_join(time_keyed_right, 'time', by='key')[
time_keyed_left, time_keyed_right.other_value
]
result = expr.compile()
expected = dd.merge_asof(
time_keyed_df1, time_keyed_df2, on='time', by='key'
)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@merge_asof_minversion
def test_keyed_asof_join_with_tolerance(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2
):
expr = time_keyed_left.asof_join(
time_keyed_right, 'time', by='key', tolerance=2 * ibis.interval(days=1)
)[time_keyed_left, time_keyed_right.other_value]
result = expr.compile()
expected = dd.merge_asof(
time_keyed_df1,
time_keyed_df2,
on='time',
by='key',
tolerance=Timedelta('2D'),
)
tm.assert_frame_equal(
result[expected.columns].compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@pytest.mark.parametrize(
"how",
[
"left",
pytest.param(
"right",
marks=pytest.mark.xfail(
raises=AttributeError, reason="right_join is not an ibis API"
),
),
"inner",
"outer",
],
)
@pytest.mark.parametrize(
"func",
[
pytest.param(lambda join: join["a0", "a1"], id="tuple"),
pytest.param(lambda join: join[["a0", "a1"]], id="list"),
pytest.param(lambda join: join.select(["a0", "a1"]), id="select"),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
def test_select_on_unambiguous_join(how, func, npartitions):
df_t = dd.from_pandas(
pd.DataFrame({'a0': [1, 2, 3], 'b1': list("aab")}),
npartitions=npartitions,
)
df_s = dd.from_pandas(
pd.DataFrame({'a1': [2, 3, 4], 'b2': list("abc")}),
npartitions=npartitions,
)
con = ibis.dask.connect({"t": df_t, "s": df_s})
t = con.table("t")
s = con.table("s")
method = getattr(t, "{}_join".format(how))
join = method(s, t.b1 == s.b2)
expected = dd.merge(df_t, df_s, left_on=["b1"], right_on=["b2"], how=how)[
["a0", "a1"]
]
assert not expected.compute(scheduler='single-threaded').empty
expr = func(join)
result = expr.compile()
tm.assert_frame_equal(
result.compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
@pytest.mark.parametrize(
"func",
[
pytest.param(lambda join: join["a0", "a1"], id="tuple"),
pytest.param(lambda join: join[["a0", "a1"]], id="list"),
pytest.param(lambda join: join.select(["a0", "a1"]), id="select"),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
@merge_asof_minversion
def test_select_on_unambiguous_asof_join(func, npartitions):
df_t = dd.from_pandas(
pd.DataFrame(
{'a0': [1, 2, 3], 'b1': date_range("20180101", periods=3)}
),
npartitions=npartitions,
)
df_s = dd.from_pandas(
pd.DataFrame(
{'a1': [2, 3, 4], 'b2': date_range("20171230", periods=3)}
),
npartitions=npartitions,
)
con = ibis.dask.connect({"t": df_t, "s": df_s})
t = con.table("t")
s = con.table("s")
join = t.asof_join(s, t.b1 == s.b2)
expected = dd.merge_asof(df_t, df_s, left_on=["b1"], right_on=["b2"])[
["a0", "a1"]
]
assert not expected.compute(scheduler='single-threaded').empty
expr = func(join)
result = expr.compile()
tm.assert_frame_equal(
result.compute(scheduler='single-threaded'),
expected.compute(scheduler='single-threaded'),
)
| apache-2.0 |
rvraghav93/scikit-learn | sklearn/feature_extraction/text.py | 6 | 53305 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams_append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin, TransformerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary : boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
alternate_sign : boolean, optional, default True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionadded:: 0.19
non_negative : boolean, optional, default False
When True, an absolute value is applied to the features matrix prior to
returning it. When used in conjunction with alternate_sign=True, this
significantly reduces the inner product preservation property.
.. deprecated:: 0.19
This option will be removed in 0.21.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', alternate_sign=True,
non_negative=False, dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._get_hasher().fit(X, y=y)
return self
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
alternate_sign=self.alternate_sign,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = _make_int_array()
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = np.asarray(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.frombuffer(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / (1 + df(d, t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self._idf_diag.sum(axis=0))
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
kayarre/Tools | hist/create_stack.py | 1 | 6412 | import os
import pandas as pd
# import numpy as np
import SimpleITK as sitk
import networkx as nx
import pickle
import copy
# import itk
import matplotlib.pyplot as plt
# from ipywidgets import interact, fixed
# from IPython.display import clear_output
import logging
logging.basicConfig(level=logging.WARNING)
# logging.basicConfig(level=logging.DEBUG)
import utils
def main():
# this register the cropped images
# df_path = "/Volumes/SD/caseFiles/vwi_proj/process_df.pkl"
# crop_dir = '/Volumes/SD/caseFiles/vwi_proc'
# csv = pd.read_csv(os.path.join(crop_dir, "case_1.csv"))
# df = pd.read_pickle(os.path.join(crop_dir, "case_1.pkl"))
case_file = "case_1.pkl"
reference_index = 0
top_dir = "/Volumes/SD/caseFiles"
#top_dir = "/media/store/krs/caseFiles"
# top_dir = "/media/sansomk/510808DF6345C808/caseFiles"
df = pd.read_pickle(os.path.join(top_dir, case_file))
n_rows = len(df.index)
# print(df.head())
#relabel_paths = True
#in_dir = "vwi_proj"
#out_dir = "vwi_proc"
trans_dir = "vwi_trans"
#image_dir = "images"
#test_dir = "test"
resample_dir = "resample"
#mask_dir = "masks"
#print(df.head())
#print(df.columns)
# print(df["Image_ID"].values.dtype)
# save the registration data
reg_path = os.path.join(top_dir, case_file.split(".")[0] + "_reg_data.pkl" )
reg_n = pickle.load(open(reg_path, "rb"))
graph_name = case_file.split(".")[0] + "_2.gpkl"
graph_path = os.path.join(top_dir, graph_name)
G_read = nx.read_gpickle(graph_path)
G = G_read.copy()
for n1, n2, d in G.edges(data=True):
#print(n1,n2)
# print(d["transform_file_name"])
transform_path = os.path.join(top_dir, d["transform_file_name"])
trans_ = sitk.ReadTransform(transform_path)
G.edges[(n1,n2)]["transform"] = trans_
# print("yo", trans_.GetDimension(), trans_.GetFixedParameters(),
# trans_.GetParameters(), trans_.GetName())
writer = sitk.ImageFileWriter()
# TODO make another script that can generate this from saved transforms and graph
ref_key = (reference_index, reference_index)
reg_dict = {}
for j in range(n_rows):
print(j)
data = df.iloc[j]
# j is the moving image
trans_list, trans_chain = utils._calculate_composite(G, reference_index, j)
print(trans_chain)
# Instantiate composite transform which will handle all the partial
# transformations.
composite_transform = sitk.Transform(2, sitk.sitkEuler)
#composite_transform = sitk.Transform(2, sitk.sitkComposite)
# Fill the composite transformation with the partial transformations:
for transform in trans_list:
#print(transform)
# print("test", transform.GetDimension(), transform.GetFixedParameters(),
# transform.GetParameters(), transform.GetName())
transform.FlattenTransform()
composite_transform.AddTransform(transform)
#print(dir(composite_transform))
#print(help(composite_transform.FlattenTransform))
#print(help(composite_transform))
#quit()
composite_transform.FlattenTransform()
# if ( j > 2):
# print(composite_transform)
# quit()
# print("yo", composite_transform.GetDimension(), composite_transform.GetFixedParameters(),
# composite_transform.GetParameters(), composite_transform.GetName())
reg_key = (reference_index, j)
#test_chain = utils._get_transformation_chain(G, reference_index, j)
print("reg key", reg_key)
#print(test_chain)
if (reg_key in [ref_key]):
# need hack to get a registration with the reference image in it
# if this doesn't work then you have big problems
for key in reg_n.keys():
if (reg_key[0] == key[0] ):
base_key = key
break
print("base key", base_key)
f_sitk = utils.read_1_tiff_image(reg_n[base_key], page_index = 5)
#print(f_sitk.GetSize())
new_image = utils.resample_1_rgb(composite_transform,
f_sitk,
mean = utils.get_mean_edges(f_sitk) )
resample_im = "resample_affine_{0}.png".format(j)
resample_path = os.path.join(top_dir, resample_dir, resample_im)
print(resample_path)
writer.SetFileName(resample_path)
writer.Execute(new_image)
reg_dict[reg_key] = dict(png_name = resample_im)
else:
#key_test = tuple(reversed(test_chain[0]))
#key_test = tuple(test_chain[0])
key_test = tuple(trans_chain[-1])
print("the moving image", key_test)
#f_sitk = utils.read_1_tiff_image(reg_n[key_test], page_index = 5)
t_sitk = utils.read_1_tiff_image_moving(reg_n[key_test], page_index = 5)
#print(reg_n[key_test])
#print(t_sitk.GetSize())
#print(t_sitk)
#print(composite_transform)
#quit()
new_image = utils.resample_1_rgb(composite_transform,
t_sitk,
mean = utils.get_mean_edges(t_sitk)
)
checkerboard = sitk.CheckerBoardImageFilter()
check_im = checkerboard.Execute(old_reg_image, new_image, (8,8))
utils.display_images(
fixed_npa=sitk.GetArrayViewFromImage(old_reg_image),
moving_npa=sitk.GetArrayViewFromImage(new_image),
checkerboard=sitk.GetArrayViewFromImage(check_im),
show=True
)
#print(new_image)
#test_im = sitk.Image([10,10], sitk.sitkVectorUInt8, 3)
#print(test_im)
#quit()
resample_im = "resample_affine_{0}.png".format(j)
resample_path = os.path.join(top_dir, resample_dir, resample_im)
print(resample_path)
writer.SetFileName(resample_path)
writer.Execute(new_image)
reg_dict[reg_key] = dict(png_name = resample_im)
resample_orig = "resample_orig_{0}.png".format(j)
resample_path = os.path.join(top_dir, resample_dir, resample_orig)
#print(resample_path)
writer.SetFileName(resample_path)
writer.Execute(t_sitk)
#old_reg_image = new_image
#print(old_reg_image.GetSize())
#if (j > 0):
img_arr = sitk.GetArrayFromImage(new_image)
img_out = sitk.GetImageFromArray(img_arr)
print(img_arr.shape)
print(img_out.GetSize())
print(dir(new_image))
img_out.CopyInformation(new_image)
# should I delete old_reg_image?
old_reg_image = sitk.Image(img_out)
if __name__ == "__main__":
main()
| bsd-2-clause |
chen0031/nupic | nupic/research/monitor_mixin/plot.py | 8 | 5063 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import os
import traceback
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
print "Cannot import matplotlib. Plot class will not work."
print traceback.format_exc() + "\n"
class Plot(object):
def __init__(self, monitor, title, show=True):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
self._show = show
if self._show:
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest", name=None):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
if self._show:
plt.draw()
if name is not None:
if not os.path.exists("log"):
os.mkdir("log")
plt.savefig("log/{name}.png".format(name=name), bbox_inches="tight",
figsize=(8, 6), dpi=400)
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| agpl-3.0 |
taynaud/sparkit-learn | splearn/pipeline.py | 2 | 13217 | # -*- coding: utf-8 -*-
from functools import reduce
import numpy as np
import scipy.sparse as sp
from sklearn.externals import six
from sklearn.externals.joblib import Parallel, delayed
from sklearn.pipeline import FeatureUnion, Pipeline, _name_estimators
from splearn.rdd import ArrayRDD, DictRDD
class SparkPipeline(Pipeline):
"""Distributed implementation of sklearn's pipeline node.
Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> import numpy as np
>>> from sklearn.base import BaseEstimator
>>> from splearn.rdd import DictRDD
>>> from splearn.linear_model.base import SparkLinearRegression
>>> from splearn.pipeline import SparkPipeline
>>> class Noiser(BaseEstimator):
>>> def __init__(self, random_seed=42):
>>> np.random.seed(random_seed)
>>> def fit(self, Z):
>>> return self
>>> def transform(self, Z):
>>> f = lambda X: X + np.random.rand(*X.shape)
>>> if isinstance(Z, DictRDD):
>>> return Z.transform(f, column='X')
>>> else:
>>> return Z.transform(f)
>>> X = np.arange(100)[:, np.newaxis]
>>> y = np.arange(100)
>>> X_rdd = sc.parallelize(X)
>>> y_rdd = sc.parallelize(y)
>>> rdd = X_rdd.zip(y_rdd)
>>> Z = DictRDD(rdd, ('X', 'y'), 25)
>>> pipe = SparkPipeline([('noise', Noiser()),
>>> ('reg', SparkLinearRegression())])
>>> pipe.fit(Z)
SparkPipeline(steps=[
('noise', Noiser(random_seed=None)),
('reg', SparkLinearRegression(copy_X=True,
fit_intercept=True,
n_jobs=1,
normalize=False)
)])
>>> pipe.predict(Z[:, 'X']).collect()
[array([ 1.51878876, 2.50336579, 3.20260105, 4.41610508, 5.52531787]),
array([ 5.56329829, 6.54787532, 7.24711057, 8.46061461, 9.5698274 ]),
array([ 9.60780781, 10.59238484, 11.2916201, 12.50512413, 13.61433693]),
array([ 13.65231734, 14.63689437, 15.33612963, 16.54963366, 17.65884645])]
"""
def _pre_transform(self, Z, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Zp = Z.persist()
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Zt = transform.fit_transform(Zp, **fit_params_steps[name])
else:
Zt = transform.fit(Zp, **fit_params_steps[name]) \
.transform(Zp)
Zp.unpersist()
Zp = Zt.persist()
return Zp, fit_params_steps[self.steps[-1][0]]
def fit(self, Z, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
Z : ArrayRDD, TupleRDD or DictRDD
Input data in blocked distributed format.
Returns
-------
self : SparkPipeline
"""
Zt, fit_params = self._pre_transform(Z, **fit_params)
self.steps[-1][-1].fit(Zt, **fit_params)
Zt.unpersist()
return self
def fit_transform(self, Z, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Zt, fit_params = self._pre_transform(Z, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Zt, **fit_params)
else:
return self.steps[-1][-1].fit(Zt, **fit_params).transform(Zt)
def score(self, Z):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Zt = Z
for name, transform in self.steps[:-1]:
Zt = transform.transform(Zt)
return self.steps[-1][-1].score(Zt)
def get_params(self, deep=True):
if not deep:
return super(SparkPipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(SparkPipeline, self).get_params(deep=False))
return out
def to_scikit(self):
scikit_steps = []
for name, step in self.steps:
if hasattr(step, 'to_scikit'):
scikit_steps.append((name, step.to_scikit()))
else:
scikit_steps.append((name, step))
return Pipeline(scikit_steps)
################################################################################
def _fit_one_transformer(transformer, Z, **fit_params):
return transformer.fit(Z, **fit_params)
def _transform_one(transformer, name, Z, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if isinstance(Z, DictRDD):
return transformer.transform(Z).transform(
lambda x: x * transformer_weights[name], 'X')
else:
return transformer.transform(Z).map(
lambda x: x * transformer_weights[name])
return transformer.transform(Z)
def _fit_transform_one(transformer, name, Z, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
Z_transformed = transformer.fit_transform(Z, **fit_params)
else:
Z_transformed = transformer.fit(Z, **fit_params).transform(Z)
# multiplication by weight
if isinstance(Z, DictRDD):
Z_transformed.transform(
lambda x: x * transformer_weights[name], 'X')
else:
Z_transformed.map(lambda x: x * transformer_weights[name])
return Z_transformed, transformer
if hasattr(transformer, 'fit_transform'):
Z_transformed = transformer.fit_transform(Z, **fit_params)
return Z_transformed, transformer
else:
Z_transformed = transformer.fit(Z, **fit_params).transform(Z)
return Z_transformed, transformer
def flatten(l):
out = []
for item in l:
if isinstance(item, (list, tuple)):
out.extend(flatten(item))
else:
out.append(item)
return out
class SparkFeatureUnion(FeatureUnion):
"""TODO: rewrite docstring
Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def fit(self, Z, **fit_params):
"""TODO: rewrite docstring
Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
fit_params_steps = dict((step, {})
for step, _ in self.transformer_list)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
transformers = Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(_fit_one_transformer)(trans, Z, **fit_params_steps[name])
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, Z, **fit_params):
"""TODO: rewrite docstring
Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
return self.fit(Z, **fit_params).transform(Z)
def transform(self, Z):
"""TODO: rewrite docstring
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
if isinstance(Z, DictRDD):
X = Z[:, 'X']
else:
X = Z
Zs = [_transform_one(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list]
X_rdd = reduce(lambda x, y: x.zip(y._rdd), Zs)
X_rdd = X_rdd.map(flatten)
mapper = np.hstack
for item in X_rdd.first():
if sp.issparse(item):
mapper = sp.hstack
X_rdd = X_rdd.map(lambda x: mapper(x))
if isinstance(Z, DictRDD):
return DictRDD([X_rdd, Z[:, 'y']],
columns=Z.columns,
dtype=Z.dtype,
bsize=Z.bsize)
else:
return X_rdd
def get_params(self, deep=True):
if not deep:
return super(SparkFeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in six.iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(SparkFeatureUnion, self).get_params(deep=False))
return out
def to_scikit(self):
sk_transformer_list = []
for name, transformer in self.transformer_list:
if hasattr(transformer, 'to_scikit'):
sk_transformer_list.append((name, transformer.to_scikit()))
else:
sk_transformer_list.append((name, transformer))
return FeatureUnion(sk_transformer_list)
def make_sparkunion(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return SparkFeatureUnion(_name_estimators(transformers))
| apache-2.0 |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gdk.py | 69 | 15968 | from __future__ import division
import math
import os
import sys
import warnings
def fn_name(): return sys._getframe(1).f_code.co_name
import gobject
import gtk; gdk = gtk.gdk
import pango
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import numpy as npy
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D
from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
# Image formats that this backend supports - for FileChooser and print_figure()
IMAGE_FORMAT = ['eps', 'jpg', 'png', 'ps', 'svg'] + ['bmp'] # , 'raw', 'rgb']
IMAGE_FORMAT.sort()
IMAGE_FORMAT_DEFAULT = 'png'
class RendererGDK(RendererBase):
fontweights = {
100 : pango.WEIGHT_ULTRALIGHT,
200 : pango.WEIGHT_LIGHT,
300 : pango.WEIGHT_LIGHT,
400 : pango.WEIGHT_NORMAL,
500 : pango.WEIGHT_NORMAL,
600 : pango.WEIGHT_BOLD,
700 : pango.WEIGHT_BOLD,
800 : pango.WEIGHT_HEAVY,
900 : pango.WEIGHT_ULTRABOLD,
'ultralight' : pango.WEIGHT_ULTRALIGHT,
'light' : pango.WEIGHT_LIGHT,
'normal' : pango.WEIGHT_NORMAL,
'medium' : pango.WEIGHT_NORMAL,
'semibold' : pango.WEIGHT_BOLD,
'bold' : pango.WEIGHT_BOLD,
'heavy' : pango.WEIGHT_HEAVY,
'ultrabold' : pango.WEIGHT_ULTRABOLD,
'black' : pango.WEIGHT_ULTRABOLD,
}
# cache for efficiency, these must be at class, not instance level
layoutd = {} # a map from text prop tups to pango layouts
rotated = {} # a map from text prop tups to rotated text pixbufs
def __init__(self, gtkDA, dpi):
# widget gtkDA is used for:
# '<widget>.create_pango_layout(s)'
# cmap line below)
self.gtkDA = gtkDA
self.dpi = dpi
self._cmap = gtkDA.get_colormap()
self.mathtext_parser = MathTextParser("Agg")
def set_pixmap (self, pixmap):
self.gdkDrawable = pixmap
def set_width_height (self, width, height):
"""w,h is the figure w,h not the pixmap w,h
"""
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
transform = transform + Affine2D(). \
scale(1.0, -1.0).translate(0, self.height)
polygons = path.to_polygons(transform, self.width, self.height)
for polygon in polygons:
# draw_polygon won't take an arbitrary sequence -- it must be a list
# of tuples
polygon = [(int(round(x)), int(round(y))) for x, y in polygon]
if rgbFace is not None:
saveColor = gc.gdkGC.foreground
gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
gc.gdkGC.foreground = saveColor
if gc.gdkGC.line_width > 0:
self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
#rectangle = (int(l), self.height-int(b+h),
# int(w), int(h))
# set clip rect?
im.flipud_out()
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
has_alpha=True, bits_per_sample=8,
width=cols, height=rows)
array = pixbuf_get_pixels_array(pixbuf)
array[:,:,:] = image_array
gc = self.new_gc()
y = self.height-y-rows
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
# unflip
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
x, y = int(x), int(y)
if x <0 or y <0: # window has shrunk and text is off the edge
return
if angle not in (0,90):
warnings.warn('backend_gdk: unable to draw text at angles ' +
'other than 0 or 90')
elif ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
elif angle==90:
self._draw_rotated_text(gc, x, y, s, prop, angle)
else:
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
if angle==90:
width, height = height, width
x -= width
y -= height
imw = font_image.get_width()
imh = font_image.get_height()
N = imw * imh
# a numpixels by num fonts array
Xall = npy.zeros((N,1), npy.uint8)
image_str = font_image.as_str()
Xall[:,0] = npy.fromstring(image_str, npy.uint8)
# get the max alpha at each pixel
Xs = npy.amax(Xall,axis=1)
# convert it to it's proper shape
Xs.shape = imh, imw
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
bits_per_sample=8, width=imw, height=imh)
array = pixbuf_get_pixels_array(pixbuf)
rgb = gc.get_rgb()
array[:,:,0]=int(rgb[0]*255)
array[:,:,1]=int(rgb[1]*255)
array[:,:,2]=int(rgb[2]*255)
array[:,:,3]=Xs
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
def _draw_rotated_text(self, gc, x, y, s, prop, angle):
"""
Draw the text rotated 90 degrees, other angles are not supported
"""
# this function (and its called functions) is a bottleneck
# Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
# wrapper functions
# GTK+ 2.6 pixbufs support rotation
gdrawable = self.gdkDrawable
ggc = gc.gdkGC
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
x = int(x-h)
y = int(y-w)
if x < 0 or y < 0: # window has shrunk and text is off the edge
return
key = (x,y,s,angle,hash(prop))
imageVert = self.rotated.get(key)
if imageVert != None:
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
return
imageBack = gdrawable.get_image(x, y, w, h)
imageVert = gdrawable.get_image(x, y, h, w)
imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
visual=gdrawable.get_visual(),
width=w, height=h)
if imageFlip == None or imageBack == None or imageVert == None:
warnings.warn("Could not renderer vertical text")
return
imageFlip.set_colormap(self._cmap)
for i in range(w):
for j in range(h):
imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
gdrawable.draw_layout(ggc, x, y-b, layout)
imageIn = gdrawable.get_image(x, y, w, h)
for i in range(w):
for j in range(h):
imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
self.rotated[key] = imageVert
def _get_pango_layout(self, s, prop):
"""
Create a pango layout instance for Text 's' with properties 'prop'.
Return - pango layout (from cache if already exists)
Note that pango assumes a logical DPI of 96
Ref: pango/fonts.c/pango_font_description_set_size() manual page
"""
# problem? - cache gets bigger and bigger, is never cleared out
# two (not one) layouts are created for every text item s (then they
# are cached) - why?
key = self.dpi, s, hash(prop)
value = self.layoutd.get(key)
if value != None:
return value
size = prop.get_size_in_points() * self.dpi / 96.0
size = round(size)
font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
font = pango.FontDescription(font_str)
# later - add fontweight to font_str
font.set_weight(self.fontweights[prop.get_weight()])
layout = self.gtkDA.create_pango_layout(s)
layout.set_font_description(font)
inkRect, logicalRect = layout.get_pixel_extents()
self.layoutd[key] = layout, inkRect, logicalRect
return layout, inkRect, logicalRect
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
return w, h+1, h + 1
def new_gc(self):
return GraphicsContextGDK(renderer=self)
def points_to_pixels(self, points):
return points/72.0 * self.dpi
class GraphicsContextGDK(GraphicsContextBase):
# a cache shared by all class instances
_cached = {} # map: rgb color -> gdk.Color
_joind = {
'bevel' : gdk.JOIN_BEVEL,
'miter' : gdk.JOIN_MITER,
'round' : gdk.JOIN_ROUND,
}
_capd = {
'butt' : gdk.CAP_BUTT,
'projecting' : gdk.CAP_PROJECTING,
'round' : gdk.CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
self._cmap = renderer._cmap
def rgb_to_gdk_color(self, rgb):
"""
rgb - an RGB tuple (three 0.0-1.0 values)
return an allocated gtk.gdk.Color
"""
try:
return self._cached[tuple(rgb)]
except KeyError:
color = self._cached[tuple(rgb)] = \
self._cmap.alloc_color(
int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
return color
#def set_antialiased(self, b):
# anti-aliasing is not supported by GDK
def set_capstyle(self, cs):
GraphicsContextBase.set_capstyle(self, cs)
self.gdkGC.cap_style = self._capd[self._capstyle]
def set_clip_rectangle(self, rectangle):
GraphicsContextBase.set_clip_rectangle(self, rectangle)
if rectangle is None:
return
l,b,w,h = rectangle.bounds
rectangle = (int(l), self.renderer.height-int(b+h)+1,
int(w), int(h))
#rectangle = (int(l), self.renderer.height-int(b+h),
# int(w+1), int(h+2))
self.gdkGC.set_clip_rectangle(rectangle)
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
if dash_list == None:
self.gdkGC.line_style = gdk.LINE_SOLID
else:
pixels = self.renderer.points_to_pixels(npy.asarray(dash_list))
dl = [max(1, int(round(val))) for val in pixels]
self.gdkGC.set_dashes(dash_offset, dl)
self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
def set_foreground(self, fg, isRGB=False):
GraphicsContextBase.set_foreground(self, fg, isRGB)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_joinstyle(self, js):
GraphicsContextBase.set_joinstyle(self, js)
self.gdkGC.join_style = self._joind[self._joinstyle]
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
if w == 0:
self.gdkGC.line_width = 0
else:
pixels = self.renderer.points_to_pixels(w)
self.gdkGC.line_width = max(1, int(round(pixels)))
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGDK(thisFig)
manager = FigureManagerBase(canvas, num)
# equals:
#manager = FigureManagerBase (FigureCanvasGDK (Figure(*args, **kwargs),
# num)
return manager
class FigureCanvasGDK (FigureCanvasBase):
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
self._renderer_init()
def _renderer_init(self):
self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
def _render_figure(self, pixmap, width, height):
self._renderer.set_pixmap (pixmap)
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
width, height = self.get_width_height()
pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
pixbuf.save(filename, format)
def get_default_filetype(self):
return 'png'
| gpl-3.0 |
dbouquin/AstroHackWeek2015 | day3-machine-learning/solutions/forests.py | 14 | 2162 | from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_digits
from sklearn.learning_curve import validation_curve
digits = load_digits()
def plot_validation_curve(parameter_values, train_scores, validation_scores):
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
validation_scores_mean = np.mean(validation_scores, axis=1)
validation_scores_std = np.std(validation_scores, axis=1)
plt.fill_between(parameter_values, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(parameter_values, validation_scores_mean - validation_scores_std,
validation_scores_mean + validation_scores_std, alpha=0.1, color="g")
plt.plot(parameter_values, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(parameter_values, validation_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.ylim(validation_scores_mean.min() - .1, train_scores_mean.max() + .1)
plt.legend(loc="best")
param_range = range(1, 50)
training_scores, validation_scores = validation_curve(DecisionTreeClassifier(), digits.data, digits.target,
param_name="max_depth",
param_range=param_range,
cv=5)
plt.figure()
plot_validation_curve(param_range, training_scores, validation_scores)
param_range = range(1, 20, 1)
training_scores, validation_scores = validation_curve(RandomForestClassifier(n_estimators=100),
digits.data, digits.target,
param_name="max_features",
param_range=param_range,
cv=5)
plt.figure()
plot_validation_curve(param_range, training_scores, validation_scores)
| gpl-2.0 |
chatcannon/numpy | numpy/core/fromnumeric.py | 15 | 98980 | """Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = type(None)
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
try:
return getattr(obj, method)(*args, **kwds)
# An AttributeError occurs if the object does not have
# such a method in its class.
# A TypeError occurs if the object does have such a method
# in its class, but its signature is not identical to that
# of NumPy's. This situation has occurred in the case of
# a downstream library like 'pandas'.
except (AttributeError, TypeError):
return _wrapit(obj, method, *args, **kwds)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__))
return put(ind, v, mode=mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _wrapfunc(a, 'transpose', axes)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
In other words, ``a[index_array]`` yields a partitioned `a`.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to an introsort which will switch
heapsort when it does not make enough progress. This makes its
worst case O(n*log(n)).
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
>>> b = np.arange(6)
>>> b[4] = 0
>>> b
array([0, 1, 2, 3, 0, 5])
>>> np.argmin(b) # Only the first occurrence is returned.
0
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na:
return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate((a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
try:
# First try to use the new axis= parameter
return squeeze(axis=axis)
except TypeError:
# For backwards compatibility
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D and not a matrix, a 1-D array of the same type as `a`
containing the diagonal is returned. If `a` is a matrix, a 1-D
array containing the diagonal is returned in order to maintain
backward compatibility. If the dimension of `a` is greater than
two, then an array of diagonals is returned, "packed" from
left-most dimension to right-most (e.g., if `a` is 3-D, then the
diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
the same subtype as `a`. The shape of the returned array is
``(a.size,)``. Matrices are special cased for backward
compatibility.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1 2 3 4 5 6]
>>> print(x.reshape(-1))
[1 2 3 4 5 6]
>>> print(np.ravel(x, order='F'))
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print(np.ravel(x.T))
[1 4 2 5 3 6]
>>> print(np.ravel(x.T, order='A'))
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.array([[1,0,0], [0,2,0], [1,1,0]])
>>> x
array([[1, 0, 0],
[0, 2, 0],
[1, 1, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or array_like or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`. If `a_min` or `a_max` are array_like, then the three
arrays will be broadcasted to match their shapes.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
if type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
pass
else:
return sum(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, **kwargs)
def sometrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def alltrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
return _wrapfunc(a, 'ptp', axis=axis, out=out)
def amax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
pass
else:
return amax(axis=axis, out=out, **kwargs)
return _methods._amax(a, axis=axis,
out=out, **kwargs)
def amin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
pass
else:
return amin(axis=axis, out=out, **kwargs)
return _methods._amin(a, axis=axis,
out=out, **kwargs)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
pass
else:
return prod(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in NumPy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
# 2014-04-12, 1.9
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning, stacklevel=2)
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
return around(a, decimals=decimals, out=out)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([ 1., 1.])
>>> np.var(a, axis=1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
| bsd-3-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/computation/expr.py | 9 | 25483 | """:func:`~pandas.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, lmap, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.tools.util import compose
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError, FuncNode
from pandas.computation.scope import Scope, _ensure_scope
def tokenize_string(source):
"""Tokenize a Python source code string.
Parameters
----------
source : str
A Python source code string
"""
line_reader = StringIO(source).readline
for toknum, tokval, _, _, _ in tokenize.generate_tokens(line_reader):
yield toknum, tokval
def _rewrite_assign(tok):
"""Rewrite the assignment operator for PyTables expressions that use ``=``
as a substitute for ``==``.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
return toknum, '==' if tokval == '=' else tokval
def _replace_booleans(tok):
"""Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
if toknum == tokenize.OP:
if tokval == '&':
return tokenize.NAME, 'and'
elif tokval == '|':
return tokenize.NAME, 'or'
return toknum, tokval
return toknum, tokval
def _replace_locals(tok):
"""Replace local variables with a syntactically valid name.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
Notes
-----
This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
"""
toknum, tokval = tok
if toknum == tokenize.OP and tokval == '@':
return tokenize.OP, _LOCAL_TAG
return toknum, tokval
def _preparse(source, f=compose(_replace_locals, _replace_booleans,
_rewrite_assign)):
"""Compose a collection of tokenization functions
Parameters
----------
source : str
A Python source code string
f : callable
This takes a tuple of (toknum, tokval) as its argument and returns a
tuple with the same structure but possibly different elements. Defaults
to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
``_replace_locals``.
Returns
-------
s : str
Valid Python source code
Notes
-----
The `f` parameter can be any callable that takes *and* returns input of the
form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), 'f must be callable'
return tokenize.untokenize(lmap(f, tokenize_string(source)))
def _is_type(t):
"""Factory for a type checking function of type ``t`` or tuple of types."""
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
# partition all AST nodes
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
# nodes that we don't support directly but are needed for parsing
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# we're adding a different assignment in some cases to be equality comparison
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
"""Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable
"""
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
"""Return a function to create an op class with its symbol already passed.
Returns
-------
f : callable
"""
def f(self, node, *args, **kwargs):
"""Return a partial function with an Op subclass with an operator
already passed.
Returns
-------
f : callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
"""Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
----------
env : Scope
engine : str
parser : str
preparser : callable
"""
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
rewrite_map = {
ast.Eq: ast.In,
ast.NotEq: ast.NotIn,
ast.In: ast.In,
ast.NotIn: ast.NotIn
}
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
self.parser = parser
self.preparser = preparser
self.assigner = None
def visit(self, node, **kwargs):
if isinstance(node, string_types):
clean = self.preparser(node)
node = ast.fix_missing_locations(ast.parse(clean))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
def visit_Module(self, node, **kwargs):
if len(node.body) != 1:
raise SyntaxError('only a single expression is allowed')
expr = node.body[0]
return self.visit(expr, **kwargs)
def visit_Expr(self, node, **kwargs):
return self.visit(node.value, **kwargs)
def _rewrite_membership_op(self, node, left, right):
# the kind of the operator (is actually an instance)
op_instance = node.op
op_type = type(op_instance)
# must be two terms and the comparison operator must be ==/!=/in/not in
if is_term(left) and is_term(right) and op_type in self.rewrite_map:
left_list, right_list = map(_is_list, (left, right))
left_str, right_str = map(_is_str, (left, right))
# if there are any strings or lists in the expression
if left_list or right_list or left_str or right_str:
op_instance = self.rewrite_map[op_type]()
# pop the string variable out of locals and replace it with a list
# of one string, kind of a hack
if right_str:
name = self.env.add_tmp([right.value])
right = self.term_type(name, self.env)
if left_str:
name = self.env.add_tmp([left.value])
left = self.term_type(name, self.env)
op = self.visit(op_instance)
return op, op_instance, left, right
def _possibly_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
right = self.visit(node.right, side='right')
op, op_class, left, right = self._rewrite_membership_op(node, left,
right)
return op, op_class, left, right
def _possibly_eval(self, binop, eval_in_python):
# eval `in` and `not in` (for now) in "partial" python space
# things that can be evaluated in "eval" space will be turned into
# temporary variables. for example,
# [1,2] in a + 2 * b
# in that case a + 2 * b will be evaluated using numexpr, and the "in"
# call will be evaluated using isin (in python space)
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
eval_in_python=('in', 'not in'),
maybe_eval_in_python=('==', '!=', '<', '>',
'<=', '>=')):
res = op(lhs, rhs)
if res.has_invalid_return_type:
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(res.op, lhs.type,
rhs.type))
if self.engine != 'pytables':
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
op, op_class, left, right = self._possibly_transform_eq_ne(node)
return self._possibly_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
truediv = self.env.scope['truediv']
return lambda lhs, rhs: Div(lhs, rhs, truediv)
def visit_UnaryOp(self, node, **kwargs):
op = self.visit(node.op)
operand = self.visit(node.operand)
return op(operand)
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
def visit_NameConstant(self, node, **kwargs):
return self.const_type(node.value, self.env)
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
def visit_Index(self, node, **kwargs):
""" df.index[4] """
return self.visit(node.value)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
result = pd.eval(slobj, local_dict=self.env, engine=self.engine,
parser=self.parser)
try:
# a Term instance
v = value.value[result]
except AttributeError:
# an Op instance
lhs = pd.eval(value, local_dict=self.env, engine=self.engine,
parser=self.parser)
v = lhs[result]
name = self.env.add_tmp(v)
return self.term_type(name, env=self.env)
def visit_Slice(self, node, **kwargs):
""" df.index[slice(4,6)] """
lower = node.lower
if lower is not None:
lower = self.visit(lower).value
upper = node.upper
if upper is not None:
upper = self.visit(upper).value
step = node.step
if step is not None:
step = self.visit(step).value
return slice(lower, upper, step)
def visit_Assign(self, node, **kwargs):
"""
support a single assignment node, like
c = a + b
set the assigner at the top level, must be a Name node which
might or might not exist in the resolvers
"""
if len(node.targets) != 1:
raise SyntaxError('can only assign a single expression')
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('left hand side of an assignment must be a '
'single name')
if self.env.target is None:
raise ValueError('cannot assign without a target object')
try:
assigner = self.visit(node.targets[0], **kwargs)
except UndefinedVariableError:
assigner = node.targets[0].id
self.assigner = getattr(assigner, 'name', assigner)
if self.assigner is None:
raise SyntaxError('left hand side of an assignment must be a '
'single resolvable name')
return self.visit(node.value, **kwargs)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx
if isinstance(ctx, ast.Load):
# resolve the value
resolved = self.visit(value).value
try:
v = getattr(resolved, attr)
name = self.env.add_tmp(v)
return self.term_type(name, self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def visit_Call_35(self, node, side=None, **kwargs):
""" in 3.5 the starargs attribute was changed to be more flexible, #11097 """
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
try:
res = self.visit(node.func)
except UndefinedVariableError:
# Check if this is a supported function name
try:
res = FuncNode(node.func.id)
except ValueError:
# Raise original error
raise
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
if isinstance(res, FuncNode):
new_args = [ self.visit(arg) for arg in node.args ]
if node.keywords:
raise TypeError("Function \"{0}\" does not support keyword "
"arguments".format(res.name))
return res(*new_args, **kwargs)
else:
new_args = [ self.visit(arg).value for arg in node.args ]
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
if key.arg:
kwargs.append(ast.keyword(keyword.arg, self.visit(keyword.value)))
return self.const_type(res(*new_args, **kwargs), self.env)
def visit_Call_legacy(self, node, side=None, **kwargs):
# this can happen with: datetime.datetime
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
try:
res = self.visit(node.func)
except UndefinedVariableError:
# Check if this is a supported function name
try:
res = FuncNode(node.func.id)
except ValueError:
# Raise original error
raise
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
if isinstance(res, FuncNode):
args = [self.visit(targ) for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs)
if node.keywords or node.kwargs:
raise TypeError("Function \"{0}\" does not support keyword "
"arguments".format(res.name))
return res(*args, **kwargs)
else:
args = [self.visit(targ).value for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs).value
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
keywords[key.arg] = self.visit(key.value).value
if node.kwargs is not None:
keywords.update(self.visit(node.kwargs).value)
return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
def visit_Compare(self, node, **kwargs):
ops = node.ops
comps = node.comparators
# base case: we have something like a CMP b
if len(comps) == 1:
op = self.translate_In(ops[0])
binop = ast.BinOp(op=op, left=node.left, right=comps[0])
return self.visit(binop)
# recursive case: we have a chained comparison, a CMP b CMP c, etc.
left = node.left
values = []
for op, comp in zip(ops, comps):
new_node = self.visit(ast.Compare(comparators=[comp], left=left,
ops=[self.translate_In(op)]))
left = comp
values.append(new_node)
return self.visit(ast.BoolOp(op=ast.And(), values=values))
def _try_visit_binop(self, bop):
if isinstance(bop, (Op, Term)):
return bop
return self.visit(bop)
def visit_BoolOp(self, node, **kwargs):
def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
rhs)
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version, #11097
if compat.PY35:
BaseExprVisitor.visit_Call = BaseExprVisitor.visit_Call_35
else:
BaseExprVisitor.visit_Call = BaseExprVisitor.visit_Call_legacy
_python_not_supported = frozenset(['Dict', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
@disallow((_unsupported_nodes | _python_not_supported) -
(_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',
'Tuple'])))
class PandasExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser,
preparser=partial(_preparse, f=compose(_replace_locals,
_replace_booleans))):
super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))
class PythonExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser, preparser=lambda x: x):
super(PythonExprVisitor, self).__init__(env, engine, parser,
preparser=preparser)
class Expr(StringMixin):
"""Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
truediv : bool, optional, default True
level : int, optional, default 2
"""
def __init__(self, expr, engine='numexpr', parser='pandas', env=None,
truediv=True, level=0):
self.expr = expr
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self.env.scope['truediv'] = truediv
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
@property
def assigner(self):
return getattr(self._visitor, 'assigner', None)
def __call__(self):
return self.terms(self.env)
def __unicode__(self):
return com.pprint_thing(self.terms)
def __len__(self):
return len(self.expr)
def parse(self):
"""Parse an expression"""
return self._visitor.visit(self.expr)
@property
def names(self):
"""Get the names in an expression"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
_parsers = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor}
| artistic-2.0 |
willettk/blazar_clustering | python/allskyf19.py | 1 | 4676 | from kapteyn import maputils, tabarray
from matplotlib import pyplot as plt
import numpy
import sys
##################################################
def cylrange(epsilon):
X = numpy.arange(0,400.0,30.0);
# Replace last two (dummy) values by two values around 180 degrees
X[-1] = 180.0 - epsilon
X[-2] = 180.0 + epsilon
return X
##################################################
def doplot(frame, annim, grat, title,
lon_world=None, lat_world=None,
lon_constval=None, lat_constval=None,
lon_fmt=None, lat_fmt=None,
markerpos=None,
plotdata=False, perimeter=None, drawgrid=None,
smallversion=False, addangle0=0.0, addangle1=0.0,
framebgcolor=None, deltapx0=0.0, deltapy0=0.0,
deltapx1=0.0, deltapy1=0.0,
labkwargs0={'color':'r'}, labkwargs1={'color':'b'}):
# Apply some extra settings
if framebgcolor != None:
frame.set_axis_bgcolor(framebgcolor)
if lon_constval == None:
lon_constval = 0.0 # Reasonable for all sky plots
if lat_constval == None:
lat_constval = 0.0 # Reasonable for all sky plots
if lon_fmt == None:
lon_fmt = 'Dms'
if lat_fmt == None:
lat_fmt = 'Dms'
# Plot labels inside graticule if required
ilabs1 = grat.Insidelabels(wcsaxis=0,
world=lon_world, constval=lat_constval,
deltapx=deltapx0, deltapy=deltapy0,
addangle=addangle0, fmt=lon_fmt, **labkwargs0)
ilabs2 = grat.Insidelabels(wcsaxis=1,
world=lat_world, constval=lon_constval,
deltapx=deltapx1, deltapy=deltapy1,
addangle=addangle1, fmt=lat_fmt, **labkwargs1)
# Plot just 1 pixel c.q. marker
if markerpos != None:
annim.Marker(pos=markerpos, marker='o', color='red')
if drawgrid:
pixellabels = annim.Pixellabels(plotaxis=(2,3))
# Plot the title
if smallversion:
t = frame.set_title(title, color='g', fontsize=10)
else:
t = frame.set_title(title, color='g', fontsize=13, linespacing=1.5)
titlepos = 1.02
t.set_y(titlepos)
annim.plot()
# Plot alternative borders. Do this after the graticule is plotted
# Only then you know the frame of the graticule and plotting in that
# frame will overwrite graticule lines so that the borders look better
if perimeter != None:
p = plt.Polygon(perimeter, facecolor='#d6eaef', lw=2)
frame.add_patch(p) # Must be in frame specified by user
Xp, Yp = zip(*perimeter)
grat.frame.plot(Xp, Yp, color='r')
annim.interact_toolbarinfo()
plt.show()
###################################################################
# Set defaults
def make_allskyf19():
titlepos = 1.02
drawgrid = False
grat = None
plotbox = (0.1,0.05,0.8,0.8)
epsilon = 0.0000000001
fig = plt.figure(2,figsize=(4,4))
fig.clf()
frame = fig.add_axes(plotbox)
title = "BzCaT BL Lac sky distribution"
header = {'NAXIS' : 2,
'NAXIS1' : 100,
'NAXIS2' : 80,
'CTYPE1' : 'RA---AIT',
'CRVAL1' : 0.0,
'CRPIX1' : 50,
'CUNIT1' : 'deg',
'CDELT1' : -4.0,
'CTYPE2' : 'DEC--AIT',
'CRVAL2' : 0.0,
'CRPIX2' : 40,
'CUNIT2' : 'deg',
'CDELT2' : 4.0
}
X = cylrange(epsilon)
Y = numpy.arange(-60,90,30.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
lat_world = [-60,-30, 0, 30, 60]
# Remove the left 180 deg and print the right 180 deg instead
w1 = numpy.arange(0,151,30.0)
w2 = numpy.arange(210,360,30.0)
lon_world = numpy.concatenate((w1, w2))
labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'}
labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'}
# Load the positions of blazars from text file
fn = '/Users/willettk/Astronomy/Research/blazars/bzcat_bllac_dec_upload.txt'
fn = '/Users/willettk/Astronomy/Research/blazars/allblazars_remdup.cat'
xp, yp = annim.positionsfromfile(fn, 's', cols=[1,2])
annim.Marker(x=xp, y=yp, mode='pixels', marker='x', color='g')
# Set font sizes and colors of the grid labels
labkwargs0={'color':'r', 'fontsize':16, 'va':'baseline', 'ha':'right'}
labkwargs1={'color':'b', 'fontsize':16, 'va':'center', 'ha':'right'}
# Make the plot
doplot(frame, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
labkwargs0=labkwargs0, labkwargs1=labkwargs1)
| mit |
NicovincX2/Python-3.5 | Physique/Optique/Optique physique (ondulatoire)/Diffraction/diffraction_animation.py | 1 | 6959 | # -*- coding: utf-8 -*-
import os
"""
Simulation d'un phénomène de diffraction par une ouverture rectangulaire après
arrivée d'une onde plane inclinée d'un certain angle theta0 par rapport à la
normale.
Le graphe du bas représente la coupe en intensité (amplitude au carré) sur un
écran situé à y fixé.
Version avec animation pour montrer l'effet de modifications de paramètres
comme la largeur de la fente ou la longueur d'onde.
En touchant à dtrou_dt, on peut modifier la largeur du trou et voir l'effet
instantané sur l'onde diffracté (pas d'effet retard) [NB: si la largeur du
trou essaie de devenir inférieure à 2*dx, alors on repart dans l'autre sens]
NB: plus le trou est large, plus c'est lent car on rajoute des sources
secondaires à sommer sur l'ensemble de l'image.
NB2: la normalisation de l'onde diffractée est encore à revoir. Pour le
moment, j'ai mis un facteur 2 pour mettre en avant le phénomène, mais il
faudrait réfléchir pour savoir parfaitement quoi mettre pour que ce soit
cohérent.
"""
import numpy as np # Pour les facilités de calcul
import matplotlib.pyplot as plt # Pour les dessins
from matplotlib.colors import LightSource # Pour l'aspect en relief
from matplotlib import animation # Pour l'animation progressive
shading = False # Pour un "effet 3D"
k, w, epsilon = 5, 1, 1 # Quelques constantes
c = w / k # La vitesse des ondes
tmin, tmax = 50, 250 # L'intervalle de temps d'étude
dt = 0.4 # Le pas de temps
ycut = 19 # Le plan de coupe en y
vmin, vmax = -1, 1 # Les valeurs extrêmes de l'amplitude
trou = 8 # La taille du trou
dtrou_dt = -0.1
theta = 0.0 # L'angle d'incidence (en radians)
ext = 25.0 # Les limites de la fenêtre d'étude
dx, dy = 0.1, 0.1 # Resolution
x = np.arange(-ext, ext, dx) # Axe en x
y = np.arange(ext, -4, -dy) # et en y (à l'envers du fait de imshow)
X, Y = np.meshgrid(x, y) # pour produire la grille
# Pour définir correctement les limites de la fenêtre.
xmin, xmax, ymin, ymax = np.amin(x), np.amax(x), np.amin(y), np.amax(y)
extent = xmin, xmax, ymin, ymax
base_name = 'PNG/S03_diffraction_' # Le nom par défaut
def point_source(x, y, t, x0=0, y0=0, theta=0):
'''La fonction représentant une source située en (x0,y0) produite par un
front d'onde incliné de theta.'''
u0 = front(
x0, y0, t, theta) # Le front au niveau de la source secondaire
r = np.sqrt((x - x0)**2 + (y - y0)**2) # La distance à la source
u = u0 + k * r # La variable de déplacement
# (w*t est déjà dans le u0)
res = np.sin(u) # Simple sinus
res[u > 0] = 0.0 # Le facteur n'est pas passé...
# Facteur multiplicatif pour voir clairement quelque chose...
return 2 * res
def front(x, y, t, theta=0):
'''Définition de la ligne du front d'onde plane.
À t=0, le front d'onde passe au point (0,ymin).'''
return k * (np.sin(theta) * x + np.cos(theta) * (y - ymin)) - w * t
def onde_plane(x, y, t, theta=0):
'''Fonction représentative d'une onde plane faisant un angle theta avec
la normale. À t=0, le front d'onde passe au point (0,ymin).'''
u = front(x, y, t, theta)
res = np.sin(u) # Simple sinus
# Pour s'assurer qu'à t<0, il n'y a pas d'onde
res[u > 0] = 0.0
return res
def superposition(x, y, t, largeur_trou, theta=0):
'''Fonction calculant automatiquement la superposition des ondes après
passage pour l'ouverture de largeur 'largeur_trou'.'''
# On commence par mettre l'onde plane partout.
res = onde_plane(x, y, t, theta)
# Ensuite, on réfléchit et on corrige pour le valeurs de y > 0
x_trou = np.arange(-largeur_trou / 2, largeur_trou / 2, dx)
S = sum([point_source(x, y, t, xt, 0, theta)
for xt in x_trou]) / len(x_trou)
res[y > 0] = S[y > 0]
print(t) # Un tout petit peu de feedback
return res # et on renvoie le résultat à afficher
# for t in np.arange(tmin,tmax,dt): # On boucle sur le temps
if True:
t = 0
Z = superposition(X, Y, t, trou, theta)
# Calcul à part pour la section de coupe.
x_trou = np.arange(-trou / 2, trou / 2, dx)
Zcut = (sum([point_source(x, ycut, t, xt, 0, theta)
for xt in x_trou]) / len(x_trou))**2
# Ouverture de la figure et définition des sous-figures
fig = plt.figure(figsize=(14, 10))
ax1 = plt.subplot2grid((3, 2), (0, 0), colspan=2, rowspan=2)
titre = plt.title(
'Diffraction par une ouverture plane, $t={}$'.format(round(t, 1)))
plt.ylabel('$y$')
plt.xlim((xmin, xmax))
plt.ylim((ymin, ymax))
if shading:
ls = LightSource(azdeg=20, altdeg=65) # create light source object.
# shade data, creating an rgb array.
rgb = ls.shade(Z, plt.cm.copper)
image = plt.imshow(rgb, extent=extent)
else:
image = plt.imshow(Z, interpolation='bilinear',
extent=extent, cmap='jet', vmin=vmin, vmax=vmax)
# On rajoute deux barres pour les murs
murg = plt.annotate('', xytext=(-ext, 0), xy=(-trou / 2, 0),
arrowprops=dict(facecolor='black', width=2, frac=0, headwidth=2))
murd = plt.annotate('', xytext=(ext, 0), xy=(trou / 2, 0),
arrowprops=dict(facecolor='black', width=2, headwidth=2, frac=0))
plt.plot([-ext, ext], [ycut, ycut], '--k') # et l'endroit de la section.
# La figure du bas
ax2 = plt.subplot2grid((3, 2), (2, 0), colspan=2, sharex=ax1)
plt.xlabel('$x$')
plt.ylabel('Intensite\nSection $y={}$'.format(ycut))
plt.ylim((0, vmax**2))
section, = plt.plot(x, Zcut**2)
#plt.savefig(base_name + '{:04d}.png'.format(i))
# plt.close()
def init():
section.set_ydata([])
def animate(i):
global trou, dtrou_dt
t = i * dt + tmin
Z = superposition(X, Y, t, trou, theta)
trou += dtrou_dt * dt
if trou < 2 * dx:
dtrou_dt = - dtrou_dt
trou = 2 * dx
x_trou = np.arange(-trou / 2, trou / 2, dx)
murg.xy = (-trou / 2, 0)
murd.xy = (trou / 2, 0)
Zcut = (sum([point_source(x, ycut, t, xt, 0, theta)
for xt in x_trou]) / len(x_trou))**2
titre.set_text(
'Diffraction par une ouverture plane, $t={}$'.format(round(t, 1)))
if shading:
# shade data, creating an rgb array.
rgb = ls.shade(Z, plt.cm.copper)
image.set_data(rgb)
else:
image.set_data(Z)
section.set_ydata(Zcut**2)
# L'animation proprement dite
anim = animation.FuncAnimation(
fig, animate, frames=int((tmax - tmin) / dt), interval=20)
# Sinon, on montre en direct
plt.show()
os.system("pause")
| gpl-3.0 |
rseubert/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/core/format.py | 9 | 92505 | # -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
# pylint: disable=W0141
import sys
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import(StringIO, lzip, range, map, zip, reduce, u,
OrderedDict)
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
from pandas.io.common import _get_handle, UnicodeWriter, _expand_user
import pandas.core.common as com
import pandas.lib as lib
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
import pandas as pd
import numpy as np
import itertools
import csv
import warnings
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
whether to print column labels, default True
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
index_names : bool, optional
Prints the names of the indexes, default True"""
justify_docstring = """
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box."""
return_docstring = """
Returns
-------
formatted : string (or unicode, depending on data and options)"""
docstring_to_string = common_docstring + justify_docstring + return_docstring
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True,
na_rep='NaN', footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.length:
if footer:
footer += ', '
footer += "Length: %d" % len(self.categorical)
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None,
na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = ['%s' % i for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[')+result+u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.tools.merge import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: %s' % self.series.index.freqstr
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ("Name: %s" %
series_name) if name is not None else ""
if self.length:
if footer:
footer += ', '
footer += 'Length: %d' % len(self.series)
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: %s' % com.pprint_thing(name)
# level infos are added to the end and in a new line, like it is done for Categoricals
if com.is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
return format_array(self.tr_series._values, None,
float_format=self.float_format,
na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
maxlen = max(self.adj.len(x) for x in fmt_index) # max index len
pad_space = min(maxlen, 60)
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num-1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode='center')[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
class TextAdjustment(object):
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return compat.strlen(text, encoding=self.encoding)
def justify(self, texts, max_len, mode='right'):
return com._justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return com.adjoin(space, *lists, strlen=self.len,
justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super(EastAsianTextAdjustment, self).__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
def len(self, text):
return compat.east_asian_len(text, encoding=self.encoding,
ambiguous_width=self.ambiguous_width)
def justify(self, texts, max_len, mode='right'):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == 'left':
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == 'center':
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return self.show_dimensions is True or (self.show_dimensions == 'truncate' and
self.is_truncated)
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if com.is_integer(i):
return self.formatters[i]
else:
return None
else:
if com.is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += common_docstring + justify_docstring + return_docstring
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, **kwds):
self.frame = frame
self.buf = _expand_user(buf) if buf is not None else StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = _ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
'''
Checks whether the frame should be truncated. If so, slices
the frame up.
'''
from pandas.tools.merge import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
max_rows_adj = self.h - n_add_rows # rows available to fill with actual data
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
str_columns = self._get_formatted_column_labels(frame)
if self.header:
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
max_colwidth = max(self.col_space or 0,
*(self.adj.len(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth,
adj=self.adj)
max_len = max(np.max([self.adj.len(x) for x in fmt_values]),
max_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
else:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0),
adj=self.adj)
stringified.append(fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
col_width = self.adj.len(strcols[self.tr_size_col][0]) # infer from column header
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
cwidth = self.adj.len(strcols[ix][row_num]) # infer from above row
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_mode = 'left'
elif is_dot_col:
cwidth = self.adj.len(strcols[self.tr_size_col][0])
dot_mode = 'center'
else:
dot_mode = 'right'
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
com.pprint_thing(frame.columns),
com.pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print the whole frame
text = self.adj.adjoin(1, *strcols)
elif not isinstance(self.max_cols, int) or self.max_cols > 0: # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
row_lens = Series(text).apply(len)
max_len_col_ix = np.argmax(row_lens)
max_len = row_lens[max_len_col_ix]
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]"
% (len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
col_widths = [np.array([self.adj.len(x) for x in col]).max()
if len(col) > 0 else 0
for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
self.escape = self.kwds.get('escape', True)
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
frame.columns, frame.index))
strcols = [[info_line]]
else:
strcols = self._to_str_columns()
if self.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.index.names)
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format()
blank = ' ' * len(lev2[0])
lev3 = [blank] * clevels
if name:
lev3.append(lev.name)
for level_idx, group in itertools.groupby(
self.frame.index.labels[i]):
count = len(list(group))
lev3.extend([lev2[level_idx]] + [blank] * (count - 1))
strcols.insert(i, lev3)
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
% type(column_format))
def write(buf, frame, column_format, strcols, longtable=False):
if not longtable:
buf.write('\\begin{tabular}{%s}\n' % column_format)
buf.write('\\toprule\n')
else:
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
nlevels = frame.columns.nlevels
if any(frame.index.names):
nlevels += 1
for i, row in enumerate(zip(*strcols)):
if i == nlevels and self.header:
buf.write('\\midrule\n') # End of header
if longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{3}{r}{{Continued on next '
'page}} \\\\\n')
buf.write('\midrule\n')
buf.write('\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.escape:
crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first
.replace('_', '\\_')
.replace('%', '\\%')
.replace('$', '\\$')
.replace('#', '\\#')
.replace('{', '\\{')
.replace('}', '\\}')
.replace('~', '\\textasciitilde')
.replace('^', '\\textasciicircum')
.replace('&', '\\&') if x else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if not longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
if hasattr(self.buf, 'write'):
write(self.buf, frame, column_format, strcols, longtable)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
write(f, frame, column_format, strcols, longtable)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
frame.iloc[:, i]._values,
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)
def to_html(self, classes=None, notebook=False):
"""
Render a DataFrame to a html table.
Parameters
----------
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols,
notebook=notebook)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
def is_numeric_dtype(dtype):
return issubclass(dtype.type, np.number)
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any([l.is_floating for l in columns.levels])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if y not in self.formatters and need_leadsp[x] and not restrict_formatting:
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x] for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x
if not self._get_formatter(i) and need_leadsp[x]
else x]
for i, (col, x) in
enumerate(zip(columns, fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, MultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names,
formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(list(x), justify='left',
minimum=(self.col_space or 0),
adj=self.adj))
for x in fmt_index]
adjoined = self.adj.adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['%s' % x for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None,
notebook=False):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
self.notebook = notebook
def write(self, s, indent=0):
rs = com.pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if (self.fmt.col_space is not None
and self.fmt.col_space > 0):
tags = (tags or "")
tags += 'style="min-width: %s;"' % self.fmt.col_space
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<%s %s>' % (kind, tags)
else:
start_tag = '<%s>' % kind
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict(
[('&', r'&'), ('<', r'<'), ('>', r'>')]
)
else:
esc = {}
rs = com.pprint_thing(s, escape_chars=esc).strip()
self.write(
'%s%s</%s>' % (start_tag, rs, kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
if tags is None:
tags = {}
if align is None:
self.write('<tr>', indent)
else:
self.write('<tr style="text-align: %s;">' % align, indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write('</tr>', indent)
def write_result(self, buf):
indent = 0
frame = self.frame
_classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise AssertionError(('classes must be list or tuple, '
'not %s') % type(self.classes))
_classes.extend(self.classes)
if self.notebook:
div_style = ''
try:
import IPython
if IPython.__version__ < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except ImportError:
pass
self.write('<div{0}>'.format(div_style))
self.write('<table border="1" class="%s">' % ' '.join(_classes),
indent)
indent += self.indent_delta
indent = self._write_header(indent)
indent = self._write_body(indent)
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
(len(frame), by, len(frame.columns)))
if self.notebook:
self.write('</div>')
_put_lines(buf, self.elements)
def _write_header(self, indent):
truncate_h = self.fmt.truncate_h
row_levels = self.frame.index.nlevels
if not self.fmt.header:
# write nothing
return indent
def _column_header():
if self.fmt.index:
row = [''] * (self.frame.index.nlevels - 1)
else:
row = []
if isinstance(self.columns, MultiIndex):
if self.fmt.has_column_names and self.fmt.index:
row.append(single_column_table(self.columns.names))
else:
row.append('')
style = "text-align: %s;" % self.fmt.justify
row.extend([single_column_table(c, self.fmt.justify, style) for
c in self.columns])
else:
if self.fmt.index:
row.append(self.columns.name or '')
row.extend(self.columns)
return row
self.write('<thead>', indent)
row = []
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
template = 'colspan="%d" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel,
adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths,
levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
else: # sparse col headers do not receive a ...
values = (values[:ins_col] + (values[ins_col - 1],) +
values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + [u('...')] + values[ins_col:]
name = self.columns.names[lnum]
row = [''] * (row_levels - 1) + ['' if name is None
else com.pprint_thing(name)]
if row == [""] and self.fmt.index is False:
row = []
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags,
header=True)
else:
col_row = _column_header()
align = self.fmt.justify
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
col_row.insert(ins_col, '...')
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
if self.fmt.has_index_names and self.fmt.index:
row = [
x if x is not None else '' for x in self.frame.index.names
] + [''] * min(len(self.columns), self.max_cols)
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
row.insert(ins_col, '')
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
self.write('</thead>', indent)
return indent
def _write_body(self, indent):
self.write('<tbody>', indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(len(self.frame)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write('</tbody>', indent)
indent -= self.indent_delta
return indent
def _write_regular_rows(self, fmt_values, indent):
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
ncols = len(self.fmt.tr_frame.columns)
nrows = len(self.fmt.tr_frame)
fmt = self.fmt._get_formatter('__index__')
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta, tags=None,
nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + 1
row.insert(dot_col_ix, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="%d" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
ncols = len(frame.columns)
nrows = len(frame)
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
idx_values.insert(ins_row, tuple(dot_row))
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(ins_row, tuple([u('...')]*len(level_lengths)))
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels - sparse_offset + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=tags,
nindex_levels=len(levels) - sparse_offset)
else:
for i in range(len(frame)):
idx_values = list(zip(*frame.index.format(sparsify=False,
adjoin=False,
names=False)))
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
def _get_level_lengths(levels, sentinel=''):
from itertools import groupby
def _make_grouper():
record = {'count': 0}
def grouper(x):
if x != sentinel:
record['count'] += 1
return record['count']
return grouper
result = []
for lev in levels:
i = 0
f = _make_grouper()
recs = {}
for key, gpr in groupby(lev, f):
values = list(gpr)
recs[i] = len(values)
i += len(values)
result.append(recs)
return result
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, compression=None, quoting=None,
line_terminator='\n', chunksize=None, engine=None,
tupleize_cols=False, quotechar='"', date_format=None,
doublequote=True, escapechar=None, decimal='.'):
if engine is not None:
warnings.warn("'engine' keyword is deprecated and "
"will be removed in a future version",
FutureWarning, stacklevel=3)
self.engine = engine # remove for 0.18
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = _expand_user(path_or_buf)
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
self.compression = compression
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
# GH3457
if not self.obj.columns.is_unique and engine == 'python':
raise NotImplementedError("columns.is_unique == False not "
"supported with engine='python'")
self.tupleize_cols = tupleize_cols
self.has_mi_columns = isinstance(obj.columns, MultiIndex
) and not self.tupleize_cols
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if isinstance(obj.index, PeriodIndex):
self.data_index = obj.index.to_timestamp()
if (isinstance(self.data_index, DatetimeIndex) and
date_format is not None):
self.data_index = Index([x.strftime(date_format)
if notnull(x) else ''
for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
# original python implem. of df.to_csv
# invoked by df.to_csv(engine=python)
def _helper_csv(self, writer, na_rep=None, cols=None,
header=True, index=True,
index_label=None, float_format=None, date_format=None):
if cols is None:
cols = self.columns
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if has_aliases or header:
if index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(self.obj.index, MultiIndex):
index_label = []
for i, name in enumerate(self.obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = self.obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
encoded_cols = list(write_cols)
writer.writerow(encoded_labels + encoded_cols)
else:
encoded_cols = list(cols)
writer.writerow(encoded_cols)
if date_format is None:
date_formatter = lambda x: Timestamp(x)._repr_base
else:
def strftime_with_nulls(x):
x = Timestamp(x)
if notnull(x):
return x.strftime(date_format)
date_formatter = lambda x: strftime_with_nulls(x)
data_index = self.obj.index
if isinstance(self.obj.index, PeriodIndex):
data_index = self.obj.index.to_timestamp()
if isinstance(data_index, DatetimeIndex) and date_format is not None:
data_index = Index([date_formatter(x) for x in data_index])
values = self.obj.copy()
values.index = data_index
values.columns = values.columns.to_native_types(
na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
values = values[cols]
series = {}
for k, v in compat.iteritems(values._series):
series[k] = v._values
nlevels = getattr(data_index, 'nlevels', 1)
for j, idx in enumerate(data_index):
row_fields = []
if index:
if nlevels == 1:
row_fields = [idx]
else: # handle MultiIndex
row_fields = list(idx)
for i, col in enumerate(cols):
val = series[col][j]
if lib.checknull(val):
val = na_rep
if float_format is not None and com.is_float(val):
val = float_format % val
elif isinstance(val, (np.datetime64, Timestamp)):
val = date_formatter(val)
row_fields.append(val)
writer.writerow(row_fields)
def save(self):
# create the writer & save
if hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
if self.engine == 'python':
# to be removed in 0.13
self._helper_csv(self.writer, na_rep=self.na_rep,
float_format=self.float_format,
cols=self.cols, header=self.header,
index=self.index,
index_label=self.index_label,
date_format=self.date_format)
else:
self._save()
finally:
if close:
f.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, MultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label, (list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns:
encoded_labels += list(write_cols)
# write out the mi
if has_mi_columns:
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns.get_level_values(i))
writer.writerow(col_line)
# add blanks for the columns, so that we
# have consistent seps
encoded_labels.extend([''] * len(columns))
# write out the index label line
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
date_format=self.date_format,
quoting=self.quoting)
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
# from collections import namedtuple
# ExcelCell = namedtuple("ExcelCell",
# 'row, col, val, style, mergestart, mergeend')
class ExcelCell(object):
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
__slots__ = __fields__
def __init__(self, row, col, val,
style=None, mergestart=None, mergeend=None):
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
header_style = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
class ExcelFormatter(object):
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : dataframe
na_rep: na representation
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
output row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : boolean, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf'):
self.rowcounter = 0
self.na_rep = na_rep
self.df = df
if cols is not None:
self.df = df.loc[:, cols]
self.columns = self.df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
def _format_value(self, val):
if lib.checknull(val):
val = self.na_rep
elif com.is_float(val):
if lib.isposinf_scalar(val):
val = self.inf_rep
elif lib.isneginf_scalar(val):
val = '-%s' % self.inf_rep
elif self.float_format is not None:
val = float(self.float_format % val)
return val
def _format_header_mi(self):
if self.columns.nlevels > 1:
if not self.index:
raise NotImplementedError("Writing to Excel with MultiIndex"
" columns and no index ('index'=False) "
"is not yet implemented.")
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not(has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(sparsify=self.merge_cells, adjoin=False, names=False)
level_lengths = _get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum in range(len(level_lengths)):
name = columns.names[lnum]
yield ExcelCell(lnum, coloffset, name, header_style)
for lnum, (spans, levels, labels) in enumerate(zip(level_lengths,
columns.levels,
columns.labels)
):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style,
lnum,
coloffset + i + spans[i])
else:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(com.pprint_thing, values))
yield ExcelCell(lnum, coloffset + i + 1, v, header_style)
self.rowcounter = lnum
def _format_header_regular(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0])
colnames = self.columns
if has_aliases:
if len(self.header) != len(self.columns):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(self.columns), len(self.header))))
else:
colnames = self.header
for colindex, colname in enumerate(colnames):
yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
header_style)
def _format_header(self):
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2 = ()
if self.df.index.names:
row = [x if x is not None else ''
for x in self.df.index.names] + [''] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
for colindex, val in enumerate(row))
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self):
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
coloffset = 0
# output index and index_label?
if self.index:
# chek aliases
# if list only take first as this is not a MultiIndex
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if isinstance(self.columns, MultiIndex):
self.rowcounter += 1
if index_label and self.header is not False:
yield ExcelCell(self.rowcounter - 1,
0,
index_label,
header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, PeriodIndex):
index_values = self.df.index.to_timestamp()
coloffset = 1
for idx, idxval in enumerate(index_values):
yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val)
def _format_hierarchical_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_labels = self.index_label
# MultiIndex columns require an extra row
# with index names (blank if None) for
# unambigous round-trip, unless not merging,
# in which case the names all go on one row Issue #11328
if isinstance(self.columns, MultiIndex) and self.merge_cells:
self.rowcounter += 1
# if index labels are not empty go ahead and dump
if (any(x is not None for x in index_labels)
and self.header is not False):
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter - 1,
cidx,
name,
header_style)
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(sparsify=True, adjoin=False,
names=False)
level_lengths = _get_level_lengths(level_strs)
for spans, levels, labels in zip(level_lengths,
self.df.index.levels,
self.df.index.labels):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style,
self.rowcounter + i + spans[i] - 1,
gcolidx)
else:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield ExcelCell(self.rowcounter + idx,
gcolidx,
indexcolval,
header_style)
gcolidx += 1
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val)
def get_formatted_cells(self):
for cell in itertools.chain(self._format_header(),
self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right'):
if com.is_categorical_dtype(values):
fmt_klass = CategoricalArrayFormatter
elif com.is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif com.is_period_arraylike(values):
fmt_klass = PeriodArrayFormatter
elif com.is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif com.is_datetimetz(values):
fmt_klass = Datetime64TZFormatter
elif com.is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif com.is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format,
formatter=formatter, space=space,
justify=justify)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right'):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = '%% .%dg' % get_option("display.precision")
float_format = lambda x: fmt_str % x
else:
float_format = self.float_format
formatter = self.formatter if self.formatter is not None else \
(lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n')))
def _format(x):
if self.na_rep is not None and lib.checknull(x):
if x is None:
return 'None'
elif x is pd.NaT:
return 'NaT'
return self.na_rep
elif isinstance(x, PandasObject):
return '%s' % x
else:
# object dtype
return '%s' % formatter(x)
vals = self.values
if isinstance(vals, Index):
vals = vals._values
is_float = lib.map_infer(vals, com.is_float) & notnull(vals)
leading_space = is_float.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float[i] and leading_space:
fmt_values.append(' %s' % _format(v))
elif is_float[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(' %s' % _format(v))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
if self.float_format is not None and self.formatter is None:
self.formatter = self.float_format
def _format_with(self, fmt_str):
def _val(x, threshold):
if notnull(x):
if (threshold is None or
abs(x) > get_option("display.chop_threshold")):
return fmt_str % x
else:
if fmt_str.endswith("e"): # engineering format
return "0"
else:
return fmt_str % 0
else:
return self.na_rep
threshold = get_option("display.chop_threshold")
fmt_values = [_val(x, threshold) for x in self.values]
return _trim_zeros(fmt_values, self.na_rep)
def _format_strings(self):
if self.formatter is not None:
fmt_values = [self.formatter(x) for x in self.values]
else:
fmt_str = '%% .%df' % self.digits
fmt_values = self._format_with(fmt_str)
if len(fmt_values) > 0:
maxlen = max(len(x) for x in fmt_values)
else:
maxlen = 0
too_long = maxlen > self.digits + 6
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
has_large_values = (abs_vals > 1e8).any()
has_small_values = ((abs_vals < 10 ** (-self.digits)) &
(abs_vals > 0)).any()
if too_long and has_large_values:
fmt_str = '%% .%de' % self.digits
fmt_values = self._format_with(fmt_str)
elif has_small_values:
fmt_str = '%% .%de' % self.digits
fmt_values = self._format_with(fmt_str)
return fmt_values
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '% d' % x)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
fmt_values = format_array_from_datetime(values.asi8.ravel(),
format=_get_format_datetime64_from_values(values, self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
return fmt_values.tolist()
class PeriodArrayFormatter(IntArrayFormatter):
def _format_strings(self):
values = PeriodIndex(self.values).to_native_types()
formatter = self.formatter or (lambda x: '%s' % x)
fmt_values = [formatter(x) for x in values]
return fmt_values
class CategoricalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
fmt_values = format_array(self.values.get_values(), self.formatter,
float_format=self.float_format,
na_rep=self.na_rep, digits=self.digits,
space=self.space, justify=self.justify)
return fmt_values
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or lib.checknull(x):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(x,
nat_rep=nat_rep,
date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.asobject
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or _get_format_datetime64(is_dates_only, date_format=self.date_format))
fmt_values = [ formatter(x) for x in values ]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box)
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = 'even_day'
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{0}'".format(result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None,
adj=None):
if len(strings) == 0 or justify == 'all':
return strings
if adj is None:
adj = _get_adjustment()
max_len = np.max([adj.len(x) for x in strings])
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[:max_len - 3] + '...'
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros and decimal points.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all([x.endswith('0') for x in non_na]) and
not(any([('e' in x) or ('E' in x) for x in non_na])))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# trim decimal points
return [x[:-1] if x.endswith('.') and x != na_rep else x for x in trimmed]
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
table += (' align="%s"' % align)
if style is not None:
table += (' style="%s"' % style)
table += '><tbody>'
for i in column:
table += ('<tr><td>%s</td></tr>' % str(i))
table += '</tbody></table>'
return table
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
table += ('<td>%s</td>' % str(i))
table += '</tr></tbody></table>'
return table
def _has_names(index):
if isinstance(index, MultiIndex):
return any([x is not None for x in index.names])
else:
return index.name is not None
# ------------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
def detect_console_encoding():
"""
Try to find the most capable encoding supported by the console.
slighly modified from the way IPython handles the same issue.
"""
import locale
global _initial_defencoding
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
except AttributeError:
pass
# try again for something better
if not encoding or 'ascii' in encoding.lower():
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
# when all else fails. this will usually be "ascii"
if not encoding or 'ascii' in encoding.lower():
encoding = sys.getdefaultencoding()
# GH3360, save the reported defencoding at import time
# MPL backends may change it. Make available for debugging.
if not _initial_defencoding:
_initial_defencoding = sys.getdefaultencoding()
return encoding
def get_console_size():
"""Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
display_width = get_option('display.width')
# deprecated.
display_height = get_option('display.height', silent=True)
# Consider
# interactive shell terminal, can detect term size
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
# size non-interactive script, should disregard term size
# in addition
# width,height have default values, but setting to 'None' signals
# should use Auto-Detection, But only in interactive shell-terminal.
# Simple. yeah.
if com.in_interactive_session():
if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.height')
else:
# pure terminal
terminal_width, terminal_height = get_terminal_size()
else:
terminal_width, terminal_height = None, None
# Note if the User sets width/Height to None (auto-detection)
# and we're in a script (non-inter), this will return (None,None)
# caller needs to deal.
return (display_width or terminal_width, display_height or terminal_height)
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-%02d' % (-int_pow10)
else:
prefix = 'E+%02d' % int_pow10
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("% g%s")
else:
format_str = (u("%% .%if%%s") % self.accuracy)
formatted = format_str % (mant, prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
if __name__ == '__main__':
arr = np.array([746.03, 0.00, 5620.00, 1592.36])
# arr = np.array([11111111.1, 1.55])
# arr = [314200.0034, 1.4125678]
arr = np.array([327763.3119, 345040.9076, 364460.9915, 398226.8688,
383800.5172, 433442.9262, 539415.0568, 568590.4108,
599502.4276, 620921.8593, 620898.5294, 552427.1093,
555221.2193, 519639.7059, 388175.7, 379199.5854,
614898.25, 504833.3333, 560600., 941214.2857,
1134250., 1219550., 855736.85, 1042615.4286,
722621.3043, 698167.1818, 803750.])
fmt = FloatArrayFormatter(arr, digits=7)
print(fmt.get_result())
| gpl-2.0 |
nmayorov/scikit-learn | sklearn/utils/tests/test_multiclass.py | 34 | 13405 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
kylerbrown/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
rc/sfepy | sfepy/postprocess/plot_facets.py | 4 | 4294 | """
Functions to visualize the geometry elements and numbering and orientation of
their facets (edges and faces).
The standard geometry elements can be plotted by running::
$ python sfepy/postprocess/plot_facets.py
"""
from __future__ import absolute_import
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.linalg import (get_perpendiculars, normalize_vectors,
make_axis_rotation_matrix)
from sfepy.postprocess.plot_dofs import _get_axes, plot_mesh, plot_global_dofs
import six
from six.moves import range
def plot_geometry(ax, gel):
"""
Plot a geometry element as a wireframe.
"""
ax = plot_mesh(ax, gel.coors, [gel.conn], gel.edges)
ax = plot_global_dofs(ax, gel.coors, [gel.conn])
return ax
def plot_edges(ax, gel, length):
"""
Plot edges of a geometry element as numbered arrows.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
if gel.edges is None: return ax
l2 = 0.5 * length
for ii, edge in enumerate(gel.edges):
cc = gel.coors[edge]
centre = 0.5 * cc.sum(axis=0)
vdir = (cc - centre)
normalize_vectors(vdir)
cc = l2 * vdir + centre
draw_arrow(ax, cc, length=0.3*length, linewidth=3, color='b')
ax.text(*centre, s=ii,
color='b', fontsize=10, weight='light')
return ax
def plot_faces(ax, gel, radius, n_point):
"""
Plot faces of a 3D geometry element as numbered oriented arcs. An arc
centre corresponds to the first node of a face. It points from the first
edge towards the last edge of the face.
"""
dim = gel.dim
ax = _get_axes(ax, dim)
if dim < 3: return ax
for ii, face in enumerate(gel.faces):
cc = gel.coors[face]
t1 = cc[1, :] - cc[0, :]
t2 = cc[-1, :] - cc[0, :]
n = nm.cross(t1, t2)
nt1 = nm.linalg.norm(t1)
nt2 = nm.linalg.norm(t2)
angle = nm.arccos(nm.dot(t1, t2) / (nt1 * nt2))
da = angle / (n_point - 1)
mtx = make_axis_rotation_matrix(n, da)
rt = cc[0] + radius * t1 / nt1
coors = [rt]
for ip in range(n_point - 1):
rt = nm.dot(mtx.T, (rt - cc[0])) + cc[0]
coors.append(rt)
coors = nm.array(coors, dtype=nm.float64)
centre = coors.sum(axis=0) / coors.shape[0]
draw_arrow(ax, coors, length=0.3*radius, linewidth=3, color='r')
ax.text(*centre, s=ii,
color='r', fontsize=10, weight='light')
return ax
def draw_arrow(ax, coors, angle=20.0, length=0.3, **kwargs):
"""
Draw a line ended with an arrow head, in 2D or 3D.
"""
color = kwargs.get('color', 'b')
c0 = coors[-2]
c1 = coors[-1]
vd = c1 - c0
nvd = nm.linalg.norm(vd)
vd /= nvd
c0 = c1 - length * vd
ps = get_perpendiculars(vd)
rangle = nm.deg2rad(min(angle, 60.0))
plength = length * nm.arctan(rangle)
if coors.shape[1] == 2:
from matplotlib.patches import Polygon
cx, cy = coors[:, 0], coors[:, 1]
ax.plot(cx, cy, **kwargs)
p0 = c0 + plength * ps
p1 = c0 - plength * ps
pol = Polygon([p0, p1, c1], color=color)
ax.add_artist(pol)
else:
import mpl_toolkits.mplot3d as plt3
cx, cy, cz = coors[:, 0], coors[:, 1], coors[:, 2]
ax.plot(cx, cy, cz, **kwargs)
p00 = c0 + plength * ps[0]
p01 = c0 - plength * ps[0]
p10 = c0 + plength * ps[1]
p11 = c0 - plength * ps[1]
arr = plt3.art3d.Poly3DCollection([[p00, p01, c1],
[p10, p11, c1]], color=color)
ax.add_collection3d(arr)
if __name__ == '__main__':
from sfepy.discrete.fem.geometry_element import (GeometryElement,
geometry_data)
for key, gd in six.iteritems(geometry_data):
if key == '1_2' : continue
gel = GeometryElement(key)
ax = plot_geometry(None, gel)
ax = plot_edges(ax, gel, length=0.2)
ax = plot_faces(ax, gel, radius=0.3, n_point=5)
dd = 0.05
ax.set_xlim([-dd, 1.0 + dd])
ax.set_ylim([-dd, 1.0 + dd])
if gel.dim == 3:
ax.set_zlim([-dd, 1.0 + dd])
plt.show()
| bsd-3-clause |
shikhardb/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
iABC2XYZ/abc | MEBT/MEBT.py | 1 | 7828 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Jiang Peiyong
email: [email protected]
"""
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
def MapQuad(K,L):
# 生成四极铁的装配矩阵,其中K表示聚焦因子,L 表示四极铁的长度。
K2=np.sqrt(np.abs(K*1.))
K2_L=K2*L
if K>0:
C=np.cos(K2_L)
S=np.sin(K2_L)/K2
Sp=-np.sin(K2_L)*K2
else:
C=np.cosh(K2_L)
S=np.sinh(K2_L)/K2
Sp=np.sinh(K2_L)*K2
M=np.array([[C,S],[Sp,C]])
return M
def MapDrift(L):
M=np.array([[1.,L],[0.,1.]])
return M
def RandSigma(betaMax):
betaT=np.random.random()*betaMax
alphaT=np.random.random()*np.sign(np.random.random()-0.5)
gammaT=(1.+alphaT**2)/betaT
sigmaT=np.array([[betaT,-alphaT],[-alphaT,gammaT]])
return sigmaT
def RandLatticeBPL(numQuad):
quadL=np.random.random([numQuad])
quadK=np.random.random([numQuad])-0.5
driftL=np.random.random([numQuad+1])
return quadL,quadK,driftL
def CalBTL(numSample,sigmaTx,sigmaTy,quadL,quadK,driftL):
lenBTL=np.sum(quadL)+np.sum(driftL)
nCell=len(quadK)
kStart=np.zeros([nCell])
kEnd=np.zeros([nCell])
for iCell in range(nCell):
if iCell==0:
kStart[0]=driftL[0]
kEnd[0]=kStart[0]+quadL[0]
continue
kStart[iCell]=kEnd[iCell-1]+driftL[iCell]
kEnd[iCell]=kStart[iCell]+quadL[iCell]
Z=np.linspace(0.,lenBTL,numSample)
K=np.zeros([numSample])
for iCell in range(nCell):
K[(Z>=kStart[iCell]) * (Z<=kEnd[iCell])]=quadK[iCell]
dL=lenBTL/(numSample-1.)
betaX=np.zeros([numSample])
betaY=np.zeros([numSample])
for iL in range(numSample):
if iL==0:
betaX[0]=sigmaTx[0,0]
betaY[0]=sigmaTy[0,0]
kLocal=K[iL]
if np.abs(kLocal)<1e-6:
Mx=MapDrift(dL)
My=Mx
else:
Mx=MapQuad(kLocal,dL)
My=MapQuad(-kLocal,dL)
sigmaTx=np.matmul(np.matmul(Mx,sigmaTx),Mx.T)
sigmaTy=np.matmul(np.matmul(My,sigmaTy),My.T)
betaX[iL]=sigmaTx[0,0]
betaY[iL]=sigmaTy[0,0]
return Z,betaX,betaY
def RandItemSingle(numSample,numQuadHigh):
numQuad=np.random.randint(0,high=numQuadHigh)
flagEle=np.zeros([numSample])
flagEle[0:numQuad+1:2]=1 # D
flagEle[1:numQuad:2]=4 # Q
quadL,quadK,driftL=RandLatticeBPL(numQuad)
betaMax=100.
sigmaTx=RandSigma(betaMax)
sigmaTy=RandSigma(betaMax)
Z,betaX,betaY=CalBTL(numSample,sigmaTx,sigmaTy,quadL,quadK,driftL)
dataLattice=np.zeros([numQuadHigh,3])
dataBeam=np.zeros([numSample,4])
dataLattice[0:numQuad+1,0]=driftL
dataLattice[0:numQuad,1]=quadK
dataLattice[0:numQuad,2]=quadL
dataBeam[:,0]=Z
dataBeam[:,1]=betaX
dataBeam[:,2]=betaY
dataBeam[:,3]=flagEle
return dataLattice,dataBeam
def RandItemMulti(numItem,numSample,numQuadHigh):
dataLattice=np.zeros([numItem,numQuadHigh,3])
dataBeam=np.zeros([numItem,numSample,4])
for iItem in range(numItem):
dataLatticeSingle,dataBeamSingle=RandItemSingle(numSample,numQuadHigh)
dataLattice[iItem,:,:]=dataLatticeSingle
dataBeam[iItem,:,:]=dataBeamSingle
return dataLattice,dataBeam
def DealBeta(zGiven,betaXGiven,betaYGiven,numSample,numQuad):
interpX=interpolate.interp1d(zGiven,betaXGiven,kind='cubic')
interpY=interpolate.interp1d(zGiven,betaYGiven,kind='cubic')
Z=np.linspace(zGiven[0],zGiven[-1],numSample)
betaX=interpX(Z)
betaY=interpY(Z)
flagEle=np.zeros([numSample])
flagEle[0:numQuad+1:2]=1 # D
flagEle[1:numQuad:2]=4 # Q
dataBeam=np.zeros([numSample,4])
dataBeam[:,0]=Z
dataBeam[:,1]=betaX
dataBeam[:,2]=betaY
dataBeam[:,3]=flagEle
return dataBeam
zGiven=np.array([0,2,3,5,6,7,9,12,15,16,17])
betaXGiven=np.sin(zGiven+np.random.random(np.size(zGiven)))+3
betaYGiven=-np.sin(zGiven+np.random.random(np.size(zGiven)))+3
plt.figure('betaGiven')
plt.clf()
plt.hold
plt.plot(zGiven,betaXGiven)
plt.plot(zGiven,betaYGiven)
numSample=2**8
numQuad=5
dataBeam=DealBeta(zGiven,betaXGiven,betaYGiven,numSample,numQuad)
Z=dataBeam[:,0]
betaX=dataBeam[:,1]
betaY=dataBeam[:,2]
flagEle=dataBeam[:,3]
plt.figure('beta')
plt.clf()
plt.hold
plt.plot(Z,betaX)
plt.plot(Z,betaY)
plt.figure('flagEle')
plt.plot(flagEle)
def DealUnpack(dataBeam,dataLattice):
Z=dataBeam[0:2,0]
betaX=dataBeam[0:2,1]
betaY=dataBeam[0:2,2]
flagEle=dataBeam[:,3]
numQuad=np.sum(flagEle>0)
numQuadHigh=len(dataLattice[:,0])
driftL=dataLattice[0:numQuad+1,0]
quadK=dataLattice[0:numQuad,1]
quadL=dataLattice[0:numQuad,2]
numSample=len(flagEle)
dZ=Z[1]-Z[0]
dBetaX=betaX[1]-betaX[0]
dBetaY=betaY[1]-betaY[0]
alphaX=-dBetaX/dZ
alphaY=-dBetaY/dZ
gammaX=(1.+alphaX**2)/betaX[0]
gammaY=(1.+alphaY**2)/betaY[0]
sigmaTx=np.array([[betaX[0],-alphaX],[-alphaX,gammaX]])
sigmaTy=np.array([[betaY[0],-alphaY],[-alphaY,gammaY]])
return numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL
numItem=64
numSample=8
numQuadHigh=8
dataLattice,dataBeam=RandItemSingle(numSample,numQuadHigh)
numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL=DealUnpack(dataBeam,dataLattice)
print(numSample)
print(numSample)
print 'sigmaTx'
print sigmaTx
print 'sigmaTy'
print sigmaTy
print 'quadL'
print quadL
print 'quadK'
print quadK
print 'driftL'
print driftL
print 'dataLattice'
print dataLattice
def RoundItemSingle(numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL):
quadL=quadL*np.abs(1.+np.random.randn()/3.)
quadK=quadK*(1.+np.random.randn()/3.)
driftL=driftL*np.abs(1.+np.random.randn()/3.)
Z,betaX,betaY=CalBTL(numSample,sigmaTx,sigmaTy,quadL,quadK,driftL)
dataLattice=np.zeros([numQuadHigh,3])
dataBeam=np.zeros([numSample,4])
numQuad=len(quadK)
dataLattice[0:numQuad+1,0]=driftL
dataLattice[0:numQuad,1]=quadK
dataLattice[0:numQuad,2]=quadL
flagEle=np.zeros([numSample])
flagEle[0:numQuad+1:2]=1 # D
flagEle[1:numQuad:2]=4 # Q
dataBeam[:,0]=Z
dataBeam[:,1]=betaX
dataBeam[:,2]=betaY
dataBeam[:,3]=flagEle
return dataLattice,dataBeam
dataLattice,dataBeam= RoundItemSingle(numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL)
print 'dataLattice'
print dataLattice
def RoundItemMulti(numItem,numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL):
dataLattice=np.zeros([numItem,numQuadHigh,3])
dataBeam=np.zeros([numItem,numSample,4])
for iItem in range(numItem):
dataLatticeSingle,dataBeamSingle= RoundItemSingle(numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL)
dataLattice[iItem,:,:]=dataLatticeSingle
dataBeam[iItem,:,:]=dataBeamSingle
return dataLattice,dataBeam
dataLattice,dataBeam=RoundItemMulti(numItem,numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL)
print 'dataLattice'
print dataLattice
print 'dataBeam'
print dataBeam
def RoundItemMultiPack(numItem,dataBeamSingle,dataLatticeSingle):
numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL=DealUnpack(dataBeamSingle,dataLatticeSingle)
dataLattice,dataBeam=RoundItemMulti(numItem,numSample,numQuadHigh,sigmaTx,sigmaTy,quadL,quadK,driftL)
return dataLattice,dataBeam
dataLatticeSingle,dataBeamSingle=RandItemSingle(numSample,numQuadHigh)
dataLattice,dataBeam=RoundItemMultiPack(numItem,dataBeamSingle,dataLatticeSingle)
print '+'*180
print dataLattice
print '-'*180
print dataBeam
| gpl-3.0 |
iismd17/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/tri/triinterpolate.py | 8 | 66410 | """
Interpolation inside triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
from matplotlib.tri import Triangulation
from matplotlib.tri.trifinder import TriFinder
from matplotlib.tri.tritools import TriAnalyzer
import numpy as np
import warnings
__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
class TriInterpolator(object):
"""
Abstract base class for classes used to perform interpolation on
triangular grids.
Derived classes implement the following methods:
- ``__call__(x, y)`` ,
where x, y are array_like point coordinates of the same shape, and
that returns a masked array of the same shape containing the
interpolated z-values.
- ``gradient(x, y)`` ,
where x, y are array_like point coordinates of the same
shape, and that returns a list of 2 masked arrays of the same shape
containing the 2 derivatives of the interpolator (derivatives of
interpolated z values with respect to x and y).
"""
def __init__(self, triangulation, z, trifinder=None):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
self._z = np.asarray(z)
if self._z.shape != self._triangulation.x.shape:
raise ValueError("z array must have same length as triangulation x"
" and y arrays")
if trifinder is not None and not isinstance(trifinder, TriFinder):
raise ValueError("Expected a TriFinder object")
self._trifinder = trifinder or self._triangulation.get_trifinder()
# Default scaling factors : 1.0 (= no scaling)
# Scaling may be used for interpolations for which the order of
# magnitude of x, y has an impact on the interpolant definition.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._unit_x = 1.0
self._unit_y = 1.0
# Default triangle renumbering: None (= no renumbering)
# Renumbering may be used to avoid unecessary computations
# if complex calculations are done inside the Interpolator.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._tri_renum = None
# __call__ and gradient docstrings are shared by all subclasses
# (except, if needed, relevant additions).
# However these methods are only implemented in subclasses to avoid
# confusion in the documentation.
docstring__call__ = """
Returns a masked array containing interpolated values at the specified
x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
z : np.ma.array
Masked array of the same shape as *x* and *y* ; values
corresponding to (*x*, *y*) points outside of the triangulation
are masked out.
"""
docstringgradient = """
Returns a list of 2 masked arrays containing interpolated derivatives
at the specified x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
dzdx, dzdy : np.ma.array
2 masked arrays of the same shape as *x* and *y* ; values
corresponding to (x,y) points outside of the triangulation
are masked out.
The first returned array contains the values of
:math:`\\frac{\\partial z}{\\partial x}` and the second those of
:math:`\\frac{\\partial z}{\\partial y}`.
"""
def _interpolate_multikeys(self, x, y, tri_index=None,
return_keys=('z',)):
"""
Versatile (private) method defined for all TriInterpolators.
:meth:`_interpolate_multikeys` is a wrapper around method
:meth:`_interpolate_single_key` (to be defined in the child
subclasses).
:meth:`_interpolate_single_key actually performs the interpolation,
but only for 1-dimensional inputs and at valid locations (inside
unmasked triangles of the triangulation).
The purpose of :meth:`_interpolate_multikeys` is to implement the
following common tasks needed in all subclasses implementations:
- calculation of containing triangles
- dealing with more than one interpolation request at the same
location (e.g., if the 2 derivatives are requested, it is
unnecessary to compute the containing triangles twice)
- scaling according to self._unit_x, self._unit_y
- dealing with points outside of the grid (with fill value np.nan)
- dealing with multi-dimensionnal *x*, *y* arrays: flattening for
:meth:`_interpolate_params` call and final reshaping.
(Note that np.vectorize could do most of those things very well for
you, but it does it by function evaluations over successive tuples of
the input arrays. Therefore, this tends to be more time consuming than
using optimized numpy functions - e.g., np.dot - which can be used
easily on the flattened inputs, in the child-subclass methods
:meth:`_interpolate_single_key`.)
It is guaranteed that the calls to :meth:`_interpolate_single_key`
will be done with flattened (1-d) array_like input parameters `x`, `y`
and with flattened, valid `tri_index` arrays (no -1 index allowed).
Parameters
----------
x, y : array_like
x and y coordinates indicating where interpolated values are
requested.
tri_index : integer array_like, optional
Array of the containing triangle indices, same shape as
*x* and *y*. Defaults to None. If None, these indices
will be computed by a TriFinder instance.
(Note: For point outside the grid, tri_index[ipt] shall be -1).
return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
Defines the interpolation arrays to return, and in which order.
Returns
-------
ret : list of arrays
Each array-like contains the expected interpolated values in the
order defined by *return_keys* parameter.
"""
# Flattening and rescaling inputs arrays x, y
# (initial shape is stored for output)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
sh_ret = x.shape
if (x.shape != y.shape):
raise ValueError("x and y shall have same shapes."
" Given: {0} and {1}".format(x.shape, y.shape))
x = np.ravel(x)
y = np.ravel(y)
x_scaled = x/self._unit_x
y_scaled = y/self._unit_y
size_ret = np.size(x_scaled)
# Computes & ravels the element indexes, extract the valid ones.
if tri_index is None:
tri_index = self._trifinder(x, y)
else:
if (tri_index.shape != sh_ret):
raise ValueError(
"tri_index array is provided and shall"
" have same shape as x and y. Given: "
"{0} and {1}".format(tri_index.shape, sh_ret))
tri_index = np.ravel(tri_index)
mask_in = (tri_index != -1)
if self._tri_renum is None:
valid_tri_index = tri_index[mask_in]
else:
valid_tri_index = self._tri_renum[tri_index[mask_in]]
valid_x = x_scaled[mask_in]
valid_y = y_scaled[mask_in]
ret = []
for return_key in return_keys:
# Find the return index associated with the key.
try:
return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
except KeyError:
raise ValueError("return_keys items shall take values in"
" {'z', 'dzdx', 'dzdy'}")
# Sets the scale factor for f & df components
scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
# Computes the interpolation
ret_loc = np.empty(size_ret, dtype=np.float64)
ret_loc[~mask_in] = np.nan
ret_loc[mask_in] = self._interpolate_single_key(
return_key, valid_tri_index, valid_x, valid_y) * scale
ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
return ret
def _interpolate_single_key(self, return_key, tri_index, x, y):
"""
Performs the interpolation at points belonging to the triangulation
(inside an unmasked triangles).
Parameters
----------
return_index : string key from {'z', 'dzdx', 'dzdy'}
Identifies the requested values (z or its derivatives)
tri_index : 1d integer array
Valid triangle index (-1 prohibited)
x, y : 1d arrays, same shape as `tri_index`
Valid locations where interpolation is requested.
Returns
-------
ret : 1-d array
Returned array of the same size as *tri_index*
"""
raise NotImplementedError("TriInterpolator subclasses" +
"should implement _interpolate_single_key!")
class LinearTriInterpolator(TriInterpolator):
"""
A LinearTriInterpolator performs linear interpolation on a triangular grid.
Each triangle is represented by a plane so that an interpolated value at
point (x,y) lies on the plane of the triangle containing (x,y).
Interpolated values are therefore continuous across the triangulation, but
their first derivatives are discontinuous at edges between triangles.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If this is not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
"""
def __init__(self, triangulation, z, trifinder=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Store plane coefficients for fast interpolation calculations.
self._plane_coefficients = \
self._triangulation.calculate_plane_coefficients(self._z)
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient
def _interpolate_single_key(self, return_key, tri_index, x, y):
if return_key == 'z':
return (self._plane_coefficients[tri_index, 0]*x +
self._plane_coefficients[tri_index, 1]*y +
self._plane_coefficients[tri_index, 2])
elif return_key == 'dzdx':
return self._plane_coefficients[tri_index, 0]
elif return_key == 'dzdy':
return self._plane_coefficients[tri_index, 1]
else:
raise ValueError("Invalid return_key: " + return_key)
class CubicTriInterpolator(TriInterpolator):
"""
A CubicTriInterpolator performs cubic interpolation on triangular grids.
In one-dimension - on a segment - a cubic interpolating function is
defined by the values of the function and its derivative at both ends.
This is almost the same in 2-d inside a triangle, except that the values
of the function and its 2 derivatives have to be defined at each triangle
node.
The CubicTriInterpolator takes the value of the function at each node -
provided by the user - and internally computes the value of the
derivatives, resulting in a smooth interpolation.
(As a special feature, the user can also impose the value of the
derivatives at each node, but this is not supposed to be the common
usage.)
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
kind : {'min_E', 'geom', 'user'}, optional
Choice of the smoothing algorithm, in order to compute
the interpolant derivatives (defaults to 'min_E'):
- if 'min_E': (default) The derivatives at each node is computed
to minimize a bending energy.
- if 'geom': The derivatives at each node is computed as a
weighted average of relevant triangle normals. To be used for
speed optimization (large grids).
- if 'user': The user provides the argument `dz`, no computation
is hence needed.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
dz : tuple of array_likes (dzdx, dzdy), optional
Used only if *kind* ='user'. In this case *dz* must be provided as
(dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
are the interpolant first derivatives at the *triangulation* points.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
Notes
-----
This note is a bit technical and details the way a
:class:`~matplotlib.tri.CubicTriInterpolator` computes a cubic
interpolation.
The interpolation is based on a Clough-Tocher subdivision scheme of
the *triangulation* mesh (to make it clearer, each triangle of the
grid will be divided in 3 child-triangles, and on each child triangle
the interpolated function is a cubic polynomial of the 2 coordinates).
This technique originates from FEM (Finite Element Method) analysis;
the element used is a reduced Hsieh-Clough-Tocher (HCT)
element. Its shape functions are described in [1]_.
The assembled function is guaranteed to be C1-smooth, i.e. it is
continuous and its first derivatives are also continuous (this
is easy to show inside the triangles but is also true when crossing the
edges).
In the default case (*kind* ='min_E'), the interpolant minimizes a
curvature energy on the functional space generated by the HCT element
shape functions - with imposed values but arbitrary derivatives at each
node. The minimized functional is the integral of the so-called total
curvature (implementation based on an algorithm from [2]_ - PCG sparse
solver):
.. math::
E(z) = \\ \\frac{1}{2} \\int_{\\Omega} \\left(
\\left( \\frac{\\partial^2{z}}{\\partial{x}^2} \\right)^2 +
\\left( \\frac{\\partial^2{z}}{\\partial{y}^2} \\right)^2 +
2\\left( \\frac{\\partial^2{z}}{\\partial{y}\\partial{x}}
\\right)^2 \\right) dx\\,dy
If the case *kind* ='geom' is chosen by the user, a simple geometric
approximation is used (weighted average of the triangle normal
vectors), which could improve speed on very large grids.
References
----------
.. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
Hsieh-Clough-Tocher triangles, complete or reduced.",
International Journal for Numerical Methods in Engineering,
17(5):784 - 789. 2.01.
.. [2] C.T. Kelley, "Iterative Methods for Optimization".
"""
def __init__(self, triangulation, z, kind='min_E', trifinder=None,
dz=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Loads the underlying c++ _triangulation.
# (During loading, reordering of triangulation._triangles may occur so
# that all final triangles are now anti-clockwise)
self._triangulation.get_cpp_triangulation()
# To build the stiffness matrix and avoid zero-energy spurious modes
# we will only store internally the valid (unmasked) triangles and
# the necessary (used) points coordinates.
# 2 renumbering tables need to be computed and stored:
# - a triangle renum table in order to translate the result from a
# TriFinder instance into the internal stored triangle number.
# - a node renum table to overwrite the self._z values into the new
# (used) node numbering.
tri_analyzer = TriAnalyzer(self._triangulation)
(compressed_triangles, compressed_x, compressed_y, tri_renum,
node_renum) = tri_analyzer._get_compressed_triangulation(True, True)
self._triangles = compressed_triangles
self._tri_renum = tri_renum
# Taking into account the node renumbering in self._z:
node_mask = (node_renum == -1)
self._z[node_renum[~node_mask]] = self._z
self._z = self._z[~node_mask]
# Computing scale factors
self._unit_x = np.max(compressed_x) - np.min(compressed_x)
self._unit_y = np.max(compressed_y) - np.min(compressed_y)
self._pts = np.vstack((compressed_x/float(self._unit_x),
compressed_y/float(self._unit_y))).T
# Computing triangle points
self._tris_pts = self._pts[self._triangles]
# Computing eccentricities
self._eccs = self._compute_tri_eccentricities(self._tris_pts)
# Computing dof estimations for HCT triangle shape function
self._dof = self._compute_dof(kind, dz=dz)
# Loading HCT element
self._ReferenceElement = _ReducedHCT_Element()
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient + """
Examples
--------
An example of effective application is shown below (plot of the
direction of the vector field derivated from a known potential field):
.. plot:: mpl_examples/pylab_examples/trigradient_demo.py
"""
def _interpolate_single_key(self, return_key, tri_index, x, y):
tris_pts = self._tris_pts[tri_index]
alpha = self._get_alpha_vec(x, y, tris_pts)
ecc = self._eccs[tri_index]
dof = np.expand_dims(self._dof[tri_index], axis=1)
if return_key == 'z':
return self._ReferenceElement.get_function_values(
alpha, ecc, dof)
elif return_key in ['dzdx', 'dzdy']:
J = self._get_jacobian(tris_pts)
dzdx = self._ReferenceElement.get_function_derivatives(
alpha, J, ecc, dof)
if return_key == 'dzdx':
return dzdx[:, 0, 0]
else:
return dzdx[:, 1, 0]
else:
raise ValueError("Invalid return_key: " + return_key)
def _compute_dof(self, kind, dz=None):
"""
Computes and returns nodal dofs according to kind
Parameters
----------
kind: {'min_E', 'geom', 'user'}
Choice of the _DOF_estimator subclass to perform the gradient
estimation.
dz: tuple of array_likes (dzdx, dzdy), optional
Used only if *kind=user ; in this case passed to the
:class:`_DOF_estimator_user`.
Returns
-------
dof : array_like, shape (npts,2)
Estimation of the gradient at triangulation nodes (stored as
degree of freedoms of reduced-HCT triangle elements).
"""
if kind == 'user':
if dz is None:
raise ValueError("For a CubicTriInterpolator with "
"*kind*='user', a valid *dz* "
"argument is expected.")
TE = _DOF_estimator_user(self, dz=dz)
elif kind == 'geom':
TE = _DOF_estimator_geom(self)
elif kind == 'min_E':
TE = _DOF_estimator_min_E(self)
else:
raise ValueError("CubicTriInterpolator *kind* proposed: {0} ; "
"should be one of: "
"'user', 'geom', 'min_E'".format(kind))
return TE.compute_dof_from_df()
@staticmethod
def _get_alpha_vec(x, y, tris_pts):
"""
Fast (vectorized) function to compute barycentric coordinates alpha.
Parameters
----------
x, y : array-like of dim 1 (shape (nx,))
Coordinates of the points whose points barycentric
coordinates are requested
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
alpha : array of dim 2 (shape (nx,3))
Barycentric coordinates of the points inside the containing
triangles.
"""
ndim = tris_pts.ndim-2
a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
abT = np.concatenate([np.expand_dims(a, ndim+1),
np.expand_dims(b, ndim+1)], ndim+1)
ab = _transpose_vectorized(abT)
x = np.expand_dims(x, ndim)
y = np.expand_dims(y, ndim)
OM = np.concatenate([x, y], ndim) - tris_pts[:, 0, :]
metric = _prod_vectorized(ab, abT)
# Here we try to deal with the colinear cases.
# metric_inv is in this case set to the Moore-Penrose pseudo-inverse
# meaning that we will still return a set of valid barycentric
# coordinates.
metric_inv = _pseudo_inv22sym_vectorized(metric)
Covar = _prod_vectorized(ab, _transpose_vectorized(
np.expand_dims(OM, ndim)))
ksi = _prod_vectorized(metric_inv, Covar)
alpha = _to_matrix_vectorized([
[1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
return alpha
@staticmethod
def _get_jacobian(tris_pts):
"""
Fast (vectorized) function to compute triangle jacobian matrix.
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
J : array of dim 3 (shape (nx,2,2))
Barycentric coordinates of the points inside the containing
triangles.
J[itri,:,:] is the jacobian matrix at apex 0 of the triangle
itri, so that the following (matrix) relationship holds:
[dz/dksi] = [J] x [dz/dx]
with x: global coordinates
ksi: element parametric coordinates in triangle first apex
local basis.
"""
a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
[b[:, 0], b[:, 1]]])
return J
@staticmethod
def _compute_tri_eccentricities(tris_pts):
"""
Computes triangle eccentricities
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the triangles apexes.
Returns
-------
ecc : array like of dim 2 (shape: (nx,3))
The so-called eccentricity parameters [1] needed for
HCT triangular element.
"""
a = np.expand_dims(tris_pts[:, 2, :]-tris_pts[:, 1, :], axis=2)
b = np.expand_dims(tris_pts[:, 0, :]-tris_pts[:, 2, :], axis=2)
c = np.expand_dims(tris_pts[:, 1, :]-tris_pts[:, 0, :], axis=2)
# Do not use np.squeeze, this is dangerous if only one triangle
# in the triangulation...
dot_a = _prod_vectorized(_transpose_vectorized(a), a)[:, 0, 0]
dot_b = _prod_vectorized(_transpose_vectorized(b), b)[:, 0, 0]
dot_c = _prod_vectorized(_transpose_vectorized(c), c)[:, 0, 0]
# Note that this line will raise a warning for dot_a, dot_b or dot_c
# zeros, but we choose not to support triangles with duplicate points.
return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
[(dot_a-dot_c) / dot_b],
[(dot_b-dot_a) / dot_c]])
# FEM element used for interpolation and for solving minimisation
# problem (Reduced HCT element)
class _ReducedHCT_Element():
"""
Implementation of reduced HCT triangular element with explicit shape
functions.
Computes z, dz, d2z and the element stiffness matrix for bending energy:
E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
*** Reference for the shape functions: ***
[1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
reduced.
Michel Bernadou, Kamal Hassan
International Journal for Numerical Methods in Engineering.
17(5):784 - 789. 2.01
*** Element description: ***
9 dofs: z and dz given at 3 apex
C1 (conform)
"""
# 1) Loads matrices to generate shape functions as a function of
# triangle eccentricities - based on [1] p.11 '''
M = np.array([
[ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
[ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
[ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
[ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
[ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
M0 = np.array([
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
[-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
M1 = np.array([
[-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
M2 = np.array([
[ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
# 2) Loads matrices to rotate components of gradient & Hessian
# vectors in the reference basis of triangle first apex (a0)
rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
[ 0., 1.], [-1., -1.],
[-1., -1.], [ 1., 0.]])
rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
[0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
[1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
# 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
# exact integral - 3 points on each subtriangles.
# NOTE: as the 2nd derivative is discontinuous , we really need those 9
# points!
n_gauss = 9
gauss_pts = np.array([[13./18., 4./18., 1./18.],
[ 4./18., 13./18., 1./18.],
[ 7./18., 7./18., 4./18.],
[ 1./18., 13./18., 4./18.],
[ 1./18., 4./18., 13./18.],
[ 4./18., 7./18., 7./18.],
[ 4./18., 1./18., 13./18.],
[13./18., 1./18., 4./18.],
[ 7./18., 4./18., 7./18.]], dtype=np.float64)
gauss_w = np.ones([9], dtype=np.float64) / 9.
# 4) Stiffness matrix for curvature energy
E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
# 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
def get_function_values(self, alpha, ecc, dofs):
"""
Parameters
----------
alpha : is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates,
ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities,
dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the N-array of interpolated function values.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
V = _to_matrix_vectorized([
[x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
[y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
prod = _prod_vectorized(self.M, V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, V))
s = _roll_vectorized(prod, 3*subtri, axis=0)
return _prod_vectorized(dofs, s)[:, 0, 0]
def get_function_derivatives(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices of
barycentric coordinates)
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
eccentricities)
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function derivatives [dz/dx, dz/dy]
in global coordinates at locations alpha, as a column-matrices of
shape (N x 2 x 1).
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
dV = _to_matrix_vectorized([
[ -3.*x_sq, -3.*x_sq],
[ 3.*y_sq, 0.],
[ 0., 3.*z_sq],
[ -2.*x*z, -2.*x*z+x_sq],
[-2.*x*y+x_sq, -2.*x*y],
[ 2.*x*y-y_sq, -y_sq],
[ 2.*y*z, y_sq],
[ z_sq, 2.*y*z],
[ -z_sq, 2.*x*z-z_sq],
[ x*z-y*z, x*y-y*z]])
# Puts back dV in first apex basis
dV = _prod_vectorized(dV, _extract_submatrices(
self.rotate_dV, subtri, block_size=2, axis=0))
prod = _prod_vectorized(self.M, dV)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, dV))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, dV))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, dV))
dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
dfdksi = _prod_vectorized(dofs, dsdksi)
# In global coordinates:
# Here we try to deal with the simpliest colinear cases, returning a
# null matrix.
J_inv = _safe_inv22_vectorized(J)
dfdx = _prod_vectorized(J_inv, _transpose_vectorized(dfdksi))
return dfdx
def get_function_hessians(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function 2nd-derivatives
[d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
as a column-matrices of shape (N x 3 x 1).
"""
d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2fdksi2 = _prod_vectorized(dofs, d2sdksi2)
H_rot = self.get_Hrot_from_J(J)
d2fdx2 = _prod_vectorized(d2fdksi2, H_rot)
return _transpose_vectorized(d2fdx2)
def get_d2Sidksij2(self, alpha, ecc):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
expressed in covariante coordinates in first apex basis.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
d2V = _to_matrix_vectorized([
[ 6.*x, 6.*x, 6.*x],
[ 6.*y, 0., 0.],
[ 0., 6.*z, 0.],
[ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
[2.*y-4.*x, 2.*y, 2.*y-2.*x],
[2.*x-4.*y, 0., -2.*y],
[ 2.*z, 0., 2.*y],
[ 0., 2.*y, 2.*z],
[ 0., 2.*x-4.*z, -2.*z],
[ -2.*z, -2.*y, x-y-z]])
# Puts back d2V in first apex basis
d2V = _prod_vectorized(d2V, _extract_submatrices(
self.rotate_d2V, subtri, block_size=3, axis=0))
prod = _prod_vectorized(self.M, d2V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, d2V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, d2V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, d2V))
d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
return d2sdksi2
def get_bending_matrices(self, J, ecc):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the element K matrices for bending energy expressed in
GLOBAL nodal coordinates.
K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
tri_J is needed to rotate dofs from local basis to global basis
"""
n = np.size(ecc, 0)
# 1) matrix to rotate dofs in global coordinates
J1 = _prod_vectorized(self.J0_to_J1, J)
J2 = _prod_vectorized(self.J0_to_J2, J)
DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
DOF_rot[:, 0, 0] = 1
DOF_rot[:, 3, 3] = 1
DOF_rot[:, 6, 6] = 1
DOF_rot[:, 1:3, 1:3] = J
DOF_rot[:, 4:6, 4:6] = J1
DOF_rot[:, 7:9, 7:9] = J2
# 2) matrix to rotate Hessian in global coordinates.
H_rot, area = self.get_Hrot_from_J(J, return_area=True)
# 3) Computes stiffness matrix
# Gauss quadrature.
K = np.zeros([n, 9, 9], dtype=np.float64)
weights = self.gauss_w
pts = self.gauss_pts
for igauss in range(self.n_gauss):
alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
alpha = np.expand_dims(alpha, 3)
weight = weights[igauss]
d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2Skdx2 = _prod_vectorized(d2Skdksi2, H_rot)
K += weight * _prod_vectorized(_prod_vectorized(d2Skdx2, self.E),
_transpose_vectorized(d2Skdx2))
# 4) With nodal (not elem) dofs
K = _prod_vectorized(_prod_vectorized(_transpose_vectorized(DOF_rot),
K), DOF_rot)
# 5) Need the area to compute total element energy
return _scalar_vectorized(area, K)
def get_Hrot_from_J(self, J, return_area=False):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
Returns
-------
Returns H_rot used to rotate Hessian from local basis of first apex,
to global coordinates.
if *return_area* is True, returns also the triangle area (0.5*det(J))
"""
# Here we try to deal with the simpliest colinear cases ; a null
# energy and area is imposed.
J_inv = _safe_inv22_vectorized(J)
Ji00 = J_inv[:, 0, 0]
Ji11 = J_inv[:, 1, 1]
Ji10 = J_inv[:, 1, 0]
Ji01 = J_inv[:, 0, 1]
H_rot = _to_matrix_vectorized([
[Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
[Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
[2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
if not return_area:
return H_rot
else:
area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
return H_rot, area
def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
"""
Builds K and F for the following elliptic formulation:
minimization of curvature energy with value of function at node
imposed and derivatives 'free'.
Builds the global Kff matrix in cco format.
Builds the full Ff vec Ff = - Kfc x Uc
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*triangles* is a (N x 3) array of nodes indexes.
*Uc* is (N x 3) array of imposed displacements at nodes
Returns
-------
(Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
(row, col) entries must be summed.
Ff: force vector - dim npts * 3
"""
ntri = np.size(ecc, 0)
vec_range = np.arange(ntri, dtype=np.int32)
c_indices = -np.ones(ntri, dtype=np.int32) # for unused dofs, -1
f_dof = [1, 2, 4, 5, 7, 8]
c_dof = [0, 3, 6]
# vals, rows and cols indices in global dof numbering
f_dof_indices = _to_matrix_vectorized([[
c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
f_row_indices = _prod_vectorized(_transpose_vectorized(f_dof_indices),
_transpose_vectorized(expand_indices))
f_col_indices = _prod_vectorized(expand_indices, f_dof_indices)
K_elem = self.get_bending_matrices(J, ecc)
# Extracting sub-matrices
# Explanation & notations:
# * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
# * Subscript c denotes 'condensated' (imposed) degrees of freedom
# (i.e. z at all nodes)
# * F = [Ff, Fc] is the force vector
# * U = [Uf, Uc] is the imposed dof vector
# [ Kff Kfc ]
# * K = [ ] is the laplacian stiffness matrix
# [ Kcf Kff ]
# * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
# Computing Kff stiffness matrix in sparse coo format
Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
# Computing Ff force vector in sparse coo format
Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
Uc_elem = np.expand_dims(Uc, axis=2)
Ff_elem = - _prod_vectorized(Kfc_elem, Uc_elem)[:, :, 0]
Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
# Extracting Ff force vector in dense format
# We have to sum duplicate indices - using bincount
Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
return Kff_rows, Kff_cols, Kff_vals, Ff
# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
# _DOF_estimator_min_E
# Private classes used to compute the degree of freedom of each triangular
# element for the TriCubicInterpolator.
class _DOF_estimator():
"""
Abstract base class for classes used to perform estimation of a function
first derivatives, and deduce the dofs for a CubicTriInterpolator using a
reduced HCT element formulation.
Derived classes implement compute_df(self,**kwargs), returning
np.vstack([dfx,dfy]).T where : dfx, dfy are the estimation of the 2
gradient coordinates.
"""
def __init__(self, interpolator, **kwargs):
if not isinstance(interpolator, CubicTriInterpolator):
raise ValueError("Expected a CubicTriInterpolator object")
self._pts = interpolator._pts
self._tris_pts = interpolator._tris_pts
self.z = interpolator._z
self._triangles = interpolator._triangles
(self._unit_x, self._unit_y) = (interpolator._unit_x,
interpolator._unit_y)
self.dz = self.compute_dz(**kwargs)
self.compute_dof_from_df()
def compute_dz(self, **kwargs):
raise NotImplementedError
def compute_dof_from_df(self):
"""
Computes reduced-HCT elements degrees of freedom, knowing the
gradient.
"""
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
tri_z = self.z[self._triangles]
tri_dz = self.dz[self._triangles]
tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
return tri_dof
@staticmethod
def get_dof_vec(tri_z, tri_dz, J):
"""
Computes the dof vector of a triangle, knowing the value of f, df and
of the local Jacobian at each node.
*tri_z*: array of shape (3,) of f nodal values
*tri_dz*: array of shape (3,2) of df/dx, df/dy nodal values
*J*: Jacobian matrix in local basis of apex 0
Returns dof array of shape (9,) so that for each apex iapex:
dof[iapex*3+0] = f(Ai)
dof[iapex*3+1] = df(Ai).(AiAi+)
dof[iapex*3+2] = df(Ai).(AiAi-)]
"""
npt = tri_z.shape[0]
dof = np.zeros([npt, 9], dtype=np.float64)
J1 = _prod_vectorized(_ReducedHCT_Element.J0_to_J1, J)
J2 = _prod_vectorized(_ReducedHCT_Element.J0_to_J2, J)
col0 = _prod_vectorized(J, np.expand_dims(tri_dz[:, 0, :], axis=3))
col1 = _prod_vectorized(J1, np.expand_dims(tri_dz[:, 1, :], axis=3))
col2 = _prod_vectorized(J2, np.expand_dims(tri_dz[:, 2, :], axis=3))
dfdksi = _to_matrix_vectorized([
[col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
[col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
dof[:, 0:7:3] = tri_z
dof[:, 1:8:3] = dfdksi[:, 0]
dof[:, 2:9:3] = dfdksi[:, 1]
return dof
class _DOF_estimator_user(_DOF_estimator):
""" dz is imposed by user / Accounts for scaling if any """
def compute_dz(self, dz):
(dzdx, dzdy) = dz
dzdx = dzdx * self._unit_x
dzdy = dzdy * self._unit_y
return np.vstack([dzdx, dzdy]).T
class _DOF_estimator_geom(_DOF_estimator):
""" Fast 'geometric' approximation, recommended for large arrays. """
def compute_dz(self):
"""
self.df is computed as weighted average of _triangles sharing a common
node. On each triangle itri f is first assumed linear (= ~f), which
allows to compute d~f[itri]
Then the following approximation of df nodal values is then proposed:
f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
The weighted coeff. w[itri] are proportional to the angle of the
triangle itri at apex ipt
"""
el_geom_w = self.compute_geom_weights()
el_geom_grad = self.compute_geom_grads()
# Sum of weights coeffs
w_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(el_geom_w))
# Sum of weighted df = (dfx, dfy)
dfx_el_w = np.empty_like(el_geom_w)
dfy_el_w = np.empty_like(el_geom_w)
for iapex in range(3):
dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
dfx_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfx_el_w))
dfy_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfy_el_w))
# Estimation of df
dfx_estim = dfx_node_sum/w_node_sum
dfy_estim = dfy_node_sum/w_node_sum
return np.vstack([dfx_estim, dfy_estim]).T
def compute_geom_weights(self):
"""
Builds the (nelems x 3) weights coeffs of _triangles angles,
renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
"""
weights = np.zeros([np.size(self._triangles, 0), 3])
tris_pts = self._tris_pts
for ipt in range(3):
p0 = tris_pts[:, (ipt) % 3, :]
p1 = tris_pts[:, (ipt+1) % 3, :]
p2 = tris_pts[:, (ipt-1) % 3, :]
alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
# In the below formula we could take modulo 2. but
# modulo 1. is safer regarding round-off errors (flat triangles).
angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))
# Weight proportional to angle up np.pi/2 ; null weight for
# degenerated cases 0. and np.pi (Note that `angle` is normalized
# by np.pi)
weights[:, ipt] = 0.5 - np.abs(angle-0.5)
return weights
def compute_geom_grads(self):
"""
Compute the (global) gradient component of f assumed linear (~f).
returns array df of shape (nelems,2)
df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
"""
tris_pts = self._tris_pts
tris_f = self.z[self._triangles]
dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
dM = np.dstack([dM1, dM2])
# Here we try to deal with the simpliest colinear cases: a null
# gradient is assumed in this case.
dM_inv = _safe_inv22_vectorized(dM)
dZ1 = tris_f[:, 1] - tris_f[:, 0]
dZ2 = tris_f[:, 2] - tris_f[:, 0]
dZ = np.vstack([dZ1, dZ2]).T
df = np.empty_like(dZ)
# With np.einsum : could be ej,eji -> ej
df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
return df
class _DOF_estimator_min_E(_DOF_estimator_geom):
"""
The 'smoothest' approximation, df is computed through global minimization
of the bending energy:
E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
"""
def __init__(self, Interpolator):
self._eccs = Interpolator._eccs
_DOF_estimator_geom.__init__(self, Interpolator)
def compute_dz(self):
"""
Elliptic solver for bending energy minimization.
Uses a dedicated 'toy' sparse Jacobi PCG solver.
"""
# Initial guess for iterative PCG solver.
dz_init = _DOF_estimator_geom.compute_dz(self)
Uf0 = np.ravel(dz_init)
reference_element = _ReducedHCT_Element()
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
eccs = self._eccs
triangles = self._triangles
Uc = self.z[self._triangles]
# Building stiffness matrix and force vector in coo format
Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
J, eccs, triangles, Uc)
# Building sparse matrix and solving minimization problem
# We could use scipy.sparse direct solver ; however to avoid this
# external dependency an implementation of a simple PCG solver with
# a simplendiagonal Jocabi preconditioner is implemented.
tol = 1.e-10
n_dof = Ff.shape[0]
Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
shape=(n_dof, n_dof))
Kff_coo.compress_csc()
Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
# If the PCG did not converge, we return the best guess between Uf0
# and Uf.
err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
if err0 < err:
# Maybe a good occasion to raise a warning here ?
warnings.warn("In TriCubicInterpolator initialization, PCG sparse"
" solver did not converge after 1000 iterations. "
"`geom` approximation is used instead of `min_E`")
Uf = Uf0
# Building dz from Uf
dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
dz[:, 0] = Uf[::2]
dz[:, 1] = Uf[1::2]
return dz
# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
# a PCG sparse solver for (symmetric) elliptic problems.
class _Sparse_Matrix_coo(object):
def __init__(self, vals, rows, cols, shape):
"""
Creates a sparse matrix in coo format
*vals*: arrays of values of non-null entries of the matrix
*rows*: int arrays of rows of non-null entries of the matrix
*cols*: int arrays of cols of non-null entries of the matrix
*shape*: 2-tuple (n,m) of matrix shape
"""
self.n, self.m = shape
self.vals = np.asarray(vals, dtype=np.float64)
self.rows = np.asarray(rows, dtype=np.int32)
self.cols = np.asarray(cols, dtype=np.int32)
def dot(self, V):
"""
Dot product of self by a vector *V* in sparse-dense to dense format
*V* dense vector of shape (self.m,)
"""
assert V.shape == (self.m,)
# For a more generic implementation we could use below kw argument
# minlength=self.m of bincount ; however:
# - it is new in numpy 1.6
# - it is unecessary when each row have at least 1 entry in global
# matrix, which is the case here.
return np.bincount(self.rows, weights=self.vals*V[self.cols])
def compress_csc(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csc format.
"""
_, unique, indices = np.unique(
self.rows + self.n*self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def compress_csr(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csr format.
"""
_, unique, indices = np.unique(
self.m*self.rows + self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def to_dense(self):
"""
Returns a dense matrix representing self.
Mainly for debugging purposes.
"""
ret = np.zeros([self.n, self.m], dtype=np.float64)
nvals = self.vals.size
for i in range(nvals):
ret[self.rows[i], self.cols[i]] += self.vals[i]
return ret
def __str__(self):
return self.to_dense().__str__()
@property
def diag(self):
"""
Returns the (dense) vector of the diagonal elements.
"""
in_diag = (self.rows == self.cols)
diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
diag[self.rows[in_diag]] = self.vals[in_diag]
return diag
def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
"""
Use Preconditioned Conjugate Gradient iteration to solve A x = b
A simple Jacobi (diagonal) preconditionner is used.
Parameters
----------
A: _Sparse_Matrix_coo
*A* must have been compressed before by compress_csc or
compress_csr method.
b: array
Right hand side of the linear system.
Returns
----------
x: array.
The converged solution.
err: float
The absolute error np.linalg.norm(A.dot(x) - b)
Other parameters
----------
x0: array.
Starting guess for the solution.
tol: float.
Tolerance to achieve. The algorithm terminates when the relative
residual is below tol.
maxiter: integer.
Maximum number of iterations. Iteration will stop
after maxiter steps even if the specified tolerance has not
been achieved.
"""
n = b.size
assert A.n == n
assert A.m == n
b_norm = np.linalg.norm(b)
# Jacobi pre-conditioner
kvec = A.diag
# For diag elem < 1e-6 we keep 1e-6.
kvec = np.where(kvec > 1.e-6, kvec, 1.e-6)
# Initial guess
if x0 is None:
x = np.zeros(n)
else:
x = x0
r = b - A.dot(x)
w = r/kvec
p = np.zeros(n)
beta = 0.0
rho = np.dot(r, w)
k = 0
# Following C. T. Kelley
while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
p = w + beta*p
z = A.dot(p)
alpha = rho/np.dot(p, z)
r = r - alpha*z
w = r/kvec
rhoold = rho
rho = np.dot(r, w)
x = x + alpha*p
beta = rho/rhoold
#err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
k += 1
err = np.linalg.norm(A.dot(x) - b)
return x, err
# The following private functions:
# :func:`_inv22_vectorized`
# :func:`_safe_inv22_vectorized`
# :func:`_pseudo_inv22sym_vectorized`
# :func:`_prod_vectorized`
# :func:`_scalar_vectorized`
# :func:`_transpose_vectorized`
# :func:`_roll_vectorized`
# :func:`_to_matrix_vectorized`
# :func:`_extract_submatrices`
# provide fast numpy implementation of some standard operations on arrays of
# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
def _inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices.
"""
assert (M.ndim == 3)
assert (M.shape[-2:] == (2, 2))
M_inv = np.empty_like(M)
delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
# Development note: Dealing with pathologic 'flat' triangles in the
# CubicTriInterpolator code and impact on (2,2)-matrix inversion functions
# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
#
# Goals:
# 1) The CubicTriInterpolator should be able to handle flat or almost flat
# triangles without raising an error,
# 2) These degenerated triangles should have no impact on the automatic dof
# calculation (associated with null weight for the _DOF_estimator_geom and
# with null energy for the _DOF_estimator_min_E),
# 3) Linear patch test should be passed exactly on degenerated meshes,
# 4) Interpolation (with :meth:`_interpolate_single_key` or
# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
# the pathologic triangles, to interact correctly with a TriRefiner class.
#
# Difficulties:
# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
# *metric* (the metric tensor = J x J.T). Computation of the local
# tangent plane is also problematic.
#
# Implementation:
# Most of the time, when computing the inverse of a rank-deficient matrix it
# is safe to simply return the null matrix (which is the implementation in
# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
# enforced by:
# - null area hence null energy in :class:`_DOF_estimator_min_E`
# - angles close or equal to 0 or np.pi hence null weight in
# :class:`_DOF_estimator_geom`.
# Note that the function angle -> weight is continuous and maximum for an
# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
# The exception is the computation of barycentric coordinates, which is done
# by inversion of the *metric* matrix. In this case, we need to compute a set
# of valid coordinates (1 among numerous possibilities), to ensure point 4).
# We benefit here from the symmetry of metric = J x J.T, which makes it easier
# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
def _safe_inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices, returns 0 for rank-deficient
matrices.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
# We set delta_inv to 0. in case of a rank deficient matrix ; a
# rank-deficient input matrix *M* will lead to a null matrix in output
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
delta_inv = 1./delta
else:
# 'Pathologic' flow.
delta_inv = np.zeros(M.shape[0])
delta_inv[rank2] = 1./delta[rank2]
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
def _pseudo_inv22sym_vectorized(M):
"""
Inversion of arrays of (2,2) SYMMETRIC matrices ; returns the
(Moore-Penrose) pseudo-inverse for rank-deficient matrices.
In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
In case M is of rank 0, we return the null matrix.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
M_inv[:, 0, 0] = M[:, 1, 1] / delta
M_inv[:, 0, 1] = -M[:, 0, 1] / delta
M_inv[:, 1, 0] = -M[:, 1, 0] / delta
M_inv[:, 1, 1] = M[:, 0, 0] / delta
else:
# 'Pathologic' flow.
# Here we have to deal with 2 sub-cases
# 1) First sub-case: matrices of rank 2:
delta = delta[rank2]
M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
# 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
rank01 = ~rank2
tr = M[rank01, 0, 0] + M[rank01, 1, 1]
tr_zeros = (np.abs(tr) < 1.e-8)
sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
#sq_tr_inv = 1. / tr**2
M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
return M_inv
def _prod_vectorized(M1, M2):
"""
Matrix product between arrays of matrices, or a matrix and an array of
matrices (*M1* and *M2*)
"""
sh1 = M1.shape
sh2 = M2.shape
assert len(sh1) >= 2
assert len(sh2) >= 2
assert sh1[-1] == sh2[-2]
ndim1 = len(sh1)
t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]
return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *
M2[..., np.newaxis, :], -3)
def _scalar_vectorized(scalar, M):
"""
Scalar product between scalars and matrices.
"""
return scalar[:, np.newaxis, np.newaxis]*M
def _transpose_vectorized(M):
"""
Transposition of an array of matrices *M*.
"""
ndim = M.ndim
assert ndim == 3
return np.transpose(M, [0, ndim-1, ndim-2])
def _roll_vectorized(M, roll_indices, axis):
"""
Rolls an array of matrices along an axis according to an array of indices
*roll_indices*
*axis* can be either 0 (rolls rows) or 1 (rolls columns).
"""
assert axis in [0, 1]
ndim = M.ndim
assert ndim == 3
ndim_roll = roll_indices.ndim
assert ndim_roll == 1
sh = M.shape
r, c = sh[-2:]
assert sh[0] == roll_indices.shape[0]
vec_indices = np.arange(sh[0], dtype=np.int32)
# Builds the rolled matrix
M_roll = np.empty_like(M)
if axis == 0:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
elif axis == 1:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
return M_roll
def _to_matrix_vectorized(M):
"""
Builds an array of matrices from individuals np.arrays of identical
shapes.
*M*: ncols-list of nrows-lists of shape sh.
Returns M_res np.array of shape (sh, nrow, ncols) so that:
M_res[...,i,j] = M[i][j]
"""
assert isinstance(M, (tuple, list))
assert all([isinstance(item, (tuple, list)) for item in M])
c_vec = np.asarray([len(item) for item in M])
assert np.all(c_vec-c_vec[0] == 0)
r = len(M)
c = c_vec[0]
M00 = np.asarray(M[0][0])
dt = M00.dtype
sh = [M00.shape[0], r, c]
M_ret = np.empty(sh, dtype=dt)
for irow in range(r):
for icol in range(c):
M_ret[:, irow, icol] = np.asarray(M[irow][icol])
return M_ret
def _extract_submatrices(M, block_indices, block_size, axis):
"""
Extracts selected blocks of a matrices *M* depending on parameters
*block_indices* and *block_size*.
Returns the array of extracted matrices *Mres* so that:
M_res[...,ir,:] = M[(block_indices*block_size+ir), :]
"""
assert block_indices.ndim == 1
assert axis in [0, 1]
r, c = M.shape
if axis == 0:
sh = [block_indices.shape[0], block_size, c]
elif axis == 1:
sh = [block_indices.shape[0], r, block_size]
dt = M.dtype
M_res = np.empty(sh, dtype=dt)
if axis == 0:
for ir in range(block_size):
M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
elif axis == 1:
for ic in range(block_size):
M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
return M_res
| mit |
meduz/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 55 | 7386 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
marcusmueller/gnuradio | gr-filter/examples/resampler.py | 7 | 4489 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print("Resampling from %f to %f by %f " %(fs_in, fs_out, rerate))
# Creating our own taps
taps = filter.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = analog.sig_source_c(fs_in, analog.GR_SIN_WAVE, fc, 1)
#self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = filter.pfb.arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = filter.pfb.arb_resampler_ccf(rerate)
self.snk_in = blocks.vector_sink_c()
self.snk_0 = blocks.vector_sink_c()
self.snk_1 = blocks.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pyplot.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in / 1000.0)))
sp1.set_xlim([-fs_in / 2, fs_in / 2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out / 1000.0)))
sp2.set_xlim([-fs_out / 2, fs_out / 2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0 / fs_in
Ts_out = 1.0 / fs_out
t_in = numpy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = numpy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pyplot.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in / 1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out / 1000.0)))
r = float(fs_out) / float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
jreback/pandas | pandas/tests/tseries/offsets/test_offsets_properties.py | 2 | 3474 | """
Behavioral based tests for offsets and date_range.
This file is adapted from https://github.com/pandas-dev/pandas/pull/18761 -
which was more ambitious but less idiomatic in its use of Hypothesis.
You may wish to consult the previous version for inspiration on further
tests, or when trying to pin down the bugs exposed by the tests below.
"""
import warnings
from hypothesis import assume, given, strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
import pytest
import pytz
import pandas as pd
from pandas import Timestamp
from pandas.tseries.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
# ----------------------------------------------------------------
# Helpers for generating random data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
min_dt = Timestamp(1900, 1, 1).to_pydatetime()
max_dt = Timestamp(1900, 1, 1).to_pydatetime()
gen_date_range = st.builds(
pd.date_range,
start=st.datetimes(
# TODO: Choose the min/max values more systematically
min_value=Timestamp(1900, 1, 1).to_pydatetime(),
max_value=Timestamp(2100, 1, 1).to_pydatetime(),
),
periods=st.integers(min_value=2, max_value=100),
freq=st.sampled_from("Y Q M D H T s ms us ns".split()),
tz=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
gen_random_datetime = st.datetimes(
min_value=min_dt,
max_value=max_dt,
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
# The strategy for each type is registered in conftest.py, as they don't carry
# enough runtime information (e.g. type hints) to infer how to build them.
gen_yqm_offset = st.one_of(
*map(
st.from_type,
[
MonthBegin,
MonthEnd,
BMonthBegin,
BMonthEnd,
QuarterBegin,
QuarterEnd,
BQuarterBegin,
BQuarterEnd,
YearBegin,
YearEnd,
BYearBegin,
BYearEnd,
],
)
)
# ----------------------------------------------------------------
# Offset-specific behaviour tests
@pytest.mark.arm_slow
@given(gen_random_datetime, gen_yqm_offset)
def test_on_offset_implementations(dt, offset):
assume(not offset.normalize)
# check that the class-specific implementations of is_on_offset match
# the general case definition:
# (dt + offset) - offset == dt
try:
compare = (dt + offset) - offset
except pytz.NonExistentTimeError:
# dt + offset does not exist, assume(False) to indicate
# to hypothesis that this is not a valid test case
assume(False)
assert offset.is_on_offset(dt) == (compare == dt)
@given(gen_yqm_offset)
def test_shift_across_dst(offset):
# GH#18319 check that 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
assume(not offset.normalize)
# Note that dti includes a transition across DST boundary
dti = pd.date_range(
start="2017-10-30 12:00:00", end="2017-11-06", freq="D", tz="US/Eastern"
)
assert (dti.hour == 12).all() # we haven't screwed up yet
res = dti + offset
assert (res.hour == 12).all()
| bsd-3-clause |
petosegan/scikit-learn | sklearn/metrics/tests/test_classification.py | 28 | 53546 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
thjashin/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 51 | 2709 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
# .predict() returns an iterator; convert to a list and print predictions
predictions = list(itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/shapes_and_collections/ellipse_collection.py | 1 | 1437 | """
==================
Ellipse Collection
==================
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import EllipseCollection
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
x = np.arange(10)
y = np.arange(15)
X, Y = np.meshgrid(x, y)
XY = np.hstack((X.ravel()[:, np.newaxis], Y.ravel()[:, np.newaxis]))
ww = X / 10.0
hh = Y / 15.0
aa = X * 9
fig, ax = plt.subplots()
ec = EllipseCollection(ww, hh, aa, units='x', offsets=XY,
transOffset=ax.transData)
ec.set_array((X + Y).ravel())
ax.add_collection(ec)
ax.autoscale_view()
ax.set_xlabel('X')
ax.set_ylabel('y')
cbar = plt.colorbar(ec)
cbar.set_label('X+Y')
pltshow(plt)
| mit |
pulinagrawal/nupic | examples/sp/sp_tutorial.py | 5 | 12502 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple tutorial that shows some features of the Spatial Pooler.
The following program has the purpose of presenting some
basic properties of the Spatial Pooler. It reproduces Figs.
5, 7 and 9 from this paper: http://arxiv.org/abs/1505.02142
To learn more about the Spatial Pooler have a look at BAMI:
http://numenta.com/biological-and-machine-intelligence/
or at its class reference in the NuPIC documentation:
http://numenta.org/docs/nupic/classnupic_1_1research_1_1spatial__pooler_1_1_spatial_pooler.html
The purpose of the Spatial Pooler is to create a sparse representation
of its inputs in such a way that similar inputs will be mapped to similar
sparse representations. Thus, the Spatial Pooler should exhibit some resilience
to noise in its input.
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nupic.research.spatial_pooler import SpatialPooler as SP
def percentOverlap(x1, x2, size):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
minX1X2 = min(nonZeroX1, nonZeroX2)
percentOverlap = 0
if minX1X2 > 0:
percentOverlap = float(np.dot(x1, x2))/float(minX1X2)
return percentOverlap
def corruptVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1
def resetVector(x1, x2):
"""
Copies the contents of vector x1 into vector x2.
@param x1 (array) binary vector to be copied
@param x2 (array) binary vector where x1 is copied
"""
size = len(x1)
for i in range(size):
x2[i] = x1[i]
random.seed(1)
uintType = "uint32"
inputDimensions = (1000,1)
columnDimensions = (2048,1)
inputSize = np.array(inputDimensions).prod()
columnNumber = np.array(columnDimensions).prod()
inputArray = np.zeros(inputSize, dtype=uintType)
for i in range(inputSize):
inputArray[i] = random.randrange(2)
activeCols = np.zeros(columnNumber, dtype=uintType)
sp = SP(inputDimensions,
columnDimensions,
potentialRadius = int(0.5*inputSize),
numActiveColumnsPerInhArea = int(0.02*columnNumber),
globalInhibition = True,
seed = 1,
synPermActiveInc = 0.01,
synPermInactiveDec = 0.008
)
# Part 1:
# -------
# A column connects to a subset of the input vector (specified
# by both the potentialRadius and potentialPct). The overlap score
# for a column is the number of connections to the input that become
# active when presented with a vector. When learning is 'on' in the SP,
# the active connections are reinforced, whereas those inactive are
# depressed (according to parameters synPermActiveInc and synPermInactiveDec.
# In order for the SP to create a sparse representation of the input, it
# will select a small percentage (usually 2%) of its most active columns,
# ie. columns with the largest overlap score.
# In this first part, we will create a histogram showing the overlap scores
# of the Spatial Pooler (SP) after feeding it with a random binary
# input. As well, the histogram will show the scores of those columns
# that are chosen to build the sparse representation of the input.
sp.compute(inputArray, False, activeCols)
overlaps = sp.getOverlaps()
activeColsScores = []
for i in activeCols.nonzero():
activeColsScores.append(overlaps[i])
print ""
print "---------------------------------"
print "Figure 1 shows an histogram of the overlap scores"
print "from all the columns in the spatial pooler, as well as the"
print "overlap scores of those columns that were selected to build a"
print "sparse representation of the input (shown in green)."
print "The SP chooses 2% of the columns with the largest overlap score"
print "to make such sparse representation."
print "---------------------------------"
print ""
bins = np.linspace(min(overlaps), max(overlaps), 28)
plt.hist(overlaps, bins, alpha=0.5, label='All cols')
plt.hist(activeColsScores, bins, alpha=0.5, label='Active cols')
plt.legend(loc='upper right')
plt.xlabel("Overlap scores")
plt.ylabel("Frequency")
plt.title("Figure 1: Column overlap of a SP with random input.")
plt.savefig("figure_1")
plt.close()
# Part 2a:
# -------
# The input overlap between two binary vectors is defined as their dot product. In order
# to normalize this value we divide by the minimum number of active inputs
# (in either vector). This means we are considering the sparser vector as reference.
# Two identical binary vectors will have an input overlap of 1, whereas two completely
# different vectors (one is the logical NOT of the other) will yield an overlap of 0.
# In this section we will see how the input overlap of two binary vectors decrease as we
# add noise to one of them.
inputX1 = np.zeros(inputSize, dtype=uintType)
inputX2 = np.zeros(inputSize, dtype=uintType)
outputX1 = np.zeros(columnNumber, dtype=uintType)
outputX2 = np.zeros(columnNumber, dtype=uintType)
for i in range(inputSize):
inputX1[i] = random.randrange(2)
x = []
y = []
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputX1, inputX2)
corruptVector(inputX2, noiseLevel)
x.append(noiseLevel)
y.append(percentOverlap(inputX1, inputX2, inputSize))
print ""
print "---------------------------------"
print "Figure 2 shows the input overlap between 2 identical binary"
print "vectors in function of the noise applied to one of them."
print "0 noise level means that the vector remains the same, whereas"
print "1 means that the vector is the logical negation of the original"
print "vector."
print "The relationship between overlap and noise level is practically"
print "linear and monotonically decreasing."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Noise level")
plt.ylabel("Input overlap")
plt.title("Figure 2: Input overlap between 2 identical vectors in function of noiseLevel.")
plt.savefig("figure_2")
plt.close()
# Part 2b:
# -------
# The output overlap between two binary input vectors is the overlap of the
# columns that become active once they are fed to the SP. In this part we
# turn learning off, and observe the output of the SP as we input two binary
# input vectors with varying level of noise.
# Starting from two identical vectors (that yield the same active columns)
# we would expect that as we add noise to one of them their output overlap
# decreases.
# In this part we will show how the output overlap behaves in function of the
# input overlap between two vectors.
# Even with an untrained spatial pooler, we see some noise resilience.
# Note that due to the non-linear properties of high dimensional SDRs, overlaps
# greater than 10 bits, or 25% in this example, are considered significant.
x = []
y = []
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputX1, inputX2)
corruptVector(inputX2, noiseLevel)
sp.compute(inputX1, False, outputX1)
sp.compute(inputX2, False, outputX2)
x.append(percentOverlap(inputX1, inputX2, inputSize))
y.append(percentOverlap(outputX1, outputX2, columnNumber))
print ""
print "---------------------------------"
print "Figure 3 shows the output overlap between two sparse representations"
print "in function of their input overlap. Starting from two identical binary vectors"
print "(which yield the same active columns) we add noise two one of them"
print "feed it to the SP, and estimate the output overlap between the two"
print "representations in terms of the common active columns between them."
print "As expected, as the input overlap decrease, so does the output overlap."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Input overlap")
plt.ylabel("Output overlap")
plt.title("Figure 3: Output overlap in function of input overlap in a SP without training")
plt.savefig("figure_3")
plt.close()
# Part 3:
# -------
# After training, a SP can become less sensitive to noise. For this purpose, we train the SP by
# turning learning on, and by exposing it to a variety of random binary vectors.
# We will expose the SP to a repetition of input patterns in order to make it learn and distinguish
# them once learning is over. This will result in robustness to noise in the inputs.
# In this section we will reproduce the plot in the last section after the SP has learned a series
# of inputs. Here we will see how the SP exhibits increased resilience to noise after learning.
# We will present 10 random vectors to the SP, and repeat this 30 times.
# Later you can try changing the number of times we do this to see how it changes the last plot.
# Then, you could also modify the number of examples to see how the SP behaves.
# Is there a relationship between the number of examples and the number of times that
# we expose them to the SP?
numExamples = 10
inputVectors = np.zeros((numExamples, inputSize), dtype=uintType)
outputColumns = np.zeros((numExamples, columnNumber), dtype=uintType)
for i in range(numExamples):
for j in range(inputSize):
inputVectors[i][j] = random.randrange(2)
# This is the number of times that we will present the input vectors to the SP
epochs = 30
for _ in range(epochs):
for i in range(numExamples):
#Feed the examples to the SP
sp.compute(inputVectors[i][:], True, outputColumns[i][:])
inputVectorsCorrupted = np.zeros((numExamples, inputSize), dtype=uintType)
outputColumnsCorrupted = np.zeros((numExamples, columnNumber), dtype=uintType)
x = []
y = []
# We will repeat the experiment in the last section for only one input vector
# in the set of input vectors
for noiseLevel in np.arange(0, 1.1, 0.1):
resetVector(inputVectors[0][:], inputVectorsCorrupted[0][:])
corruptVector(inputVectorsCorrupted[0][:], noiseLevel)
sp.compute(inputVectors[0][:], False, outputColumns[0][:])
sp.compute(inputVectorsCorrupted[0][:], False, outputColumnsCorrupted[0][:])
x.append(percentOverlap(inputVectors[0][:], inputVectorsCorrupted[0][:], inputSize))
y.append(percentOverlap(outputColumns[0][:], outputColumnsCorrupted[0][:], columnNumber))
print ""
print "---------------------------------"
print "How robust is the SP to noise after learning?"
print "Figure 4 shows again the output overlap between two binary vectors in function"
print "of their input overlap. After training, the SP exhibits more robustness to noise"
print "in its input, resulting in a -almost- sigmoid curve. This implies that even if a"
print "previous input is presented again with a certain amount of noise its sparse"
print "representation still resembles its original."
print "---------------------------------"
print ""
plt.plot(x, y)
plt.xlabel("Input overlap")
plt.ylabel("Output overlap")
plt.title("Figure 4: Output overlap in function of input overlap in a SP after training")
plt.savefig("figure_4")
plt.close()
print ""
print "+++++++++++++++++++++++++++++++++++++++++++++++++++"
print " All images generated by this script will be saved"
print " in your current working directory."
print "+++++++++++++++++++++++++++++++++++++++++++++++++++"
print ""
| agpl-3.0 |
sharescience/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.