repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xavierwu/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/cluster/dbscan_.py | 92 | 12380 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
potash/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 95 | 6971 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
jangorecki/h2o-3 | h2o-docs/src/booklets/v2_2015/source/python/python_randomized_grid_search.py | 2 | 5102 | In [57]: from sklearn.grid_search import RandomizedSearchCV
In [58]: from h2o.cross_validation import H2OKFold
In [59]: from h2o.model.regression import h2o_r2_score
In [60]: from sklearn.metrics.scorer import make_scorer
In [61]: from sklearn.metrics.scorer import make_scorer
# Parameters to test
In [62]: params = {"standardize__center": [True, False],
....: "standardize__scale": [True, False],
....: "pca__k": [2,3],
....: "gbm__ntrees": [10,20],
....: "gbm__max_depth": [1,2,3],
....: "gbm__learn_rate": [0.1,0.2]}
In [63]: custom_cv = H2OKFold(iris_df, n_folds=5, seed=42)
In [64]: pipeline = Pipeline([("standardize", H2OScaler()),
....: ("pca", H2OPCA(k=2)),
....: ("gbm", H2OGradientBoostingEstimator(distribution="gaussian"))])
In [65]: random_search = RandomizedSearchCV(pipeline, params,
....: n_iter=5,
....: scoring=make_scorer(h2o_r2_score),
....: cv=custom_cv,
....: random_state=42,
....: n_jobs=1)
In [66]: random_search.fit(iris_df[1:], iris_df[0])
Out[66]:
RandomizedSearchCV(cv=<h2o.cross_validation.H2OKFold instance at 0x10ba413d0>,
error_score='raise',
estimator=Pipeline(steps=[('standardize', <h2o.transforms.preprocessing.H2OScaler object at 0x10c0f18d0>), ('pca', ), ('gbm', )]),
fit_params={}, iid=True, n_iter=5, n_jobs=1,
param_distributions={'pca__k': [2, 3], 'gbm__ntrees': [10, 20], 'standardize__scale': [True, False], 'gbm__max_depth': [1, 2, 3], 'standardize__center': [True, False], 'gbm__learn_rate': [0.1, 0.2]},
pre_dispatch='2*n_jobs', random_state=42, refit=True,
scoring=make_scorer(h2o_r2_score), verbose=0)
In [67]: print random_search.best_estimator_
Model Details
=============
H2OPCA : Principal Component Analysis
Model Key: PCA_model_python_1446220160417_136
Importance of components:
pc1 pc2 pc3
---------------------- -------- ---------- ----------
Standard deviation 9.6974 0.091905 0.031356
Proportion of Variance 0.9999 8.98098e-05 1.04541e-05
Cumulative Proportion 0.9999 0.99999 1
ModelMetricsPCA: pca
** Reported on train data. **
MSE: NaN
RMSE: NaN
Model Details
=============
H2OGradientBoostingEstimator : Gradient Boosting Machine
Model Key: GBM_model_python_1446220160417_138
Model Summary:
number_of_trees number_of_internal_trees model_size_in_bytes min_depth max_depth mean_depth min_leaves max_leaves mean_leaves
-- ----------------- ------------------------- --------------------- ----------- ----------- ------------ ------------ ------------ -------------
20 20 2958 3 3 3 5 8 6.85
ModelMetricsRegression: gbm
** Reported on train data. **
RMSE: 0.193906262445
MAE: 0.155086582663
RMSLE: NaN
Mean Residual Deviance: 0.0375996386155
Scoring History:
timestamp duration number_of_trees training_rmse training_mse training_deviance
-- ------------------- ---------- ----------------- -------------- -------------- -------------------
2016-08-25 13:58:15 0.000 sec 0.0 0.683404046309 0.569341466973 0.467041090512
2016-08-25 13:58:15 0.002 sec 1.0 0.571086656306 0.469106400643 0.326139969011
2016-08-25 13:58:15 0.003 sec 2.0 0.483508601652 0.395952082872 0.233780567872
2016-08-25 13:58:15 0.004 sec 3.0 0.414549015095 0.339981133963 0.171850885916
2016-08-25 13:58:15 0.005 sec 4.0 0.362852508373 0.298212416346 0.131661942833
--- --- --- --- --- --- ---
2016-08-25 13:58:15 0.017 sec 16.0 0.204549491682 0.164292158112 0.0418404945473
2016-08-25 13:58:15 0.018 sec 17.0 0.201762323368 0.162030458841 0.0407080351307
2016-08-25 13:58:15 0.019 sec 18.0 0.199709571992 0.160735480674 0.0398839131454
2016-08-25 13:58:15 0.019 sec 19.0 0.196739590066 0.158067452484 0.0387064662994
2016-08-25 13:58:15 0.020 sec 20.0 0.193906262445 0.155086582663 0.0375996386155
Variable Importances:
variable relative_importance scaled_importance percentage
---------- --------------------- ------------------- ------------
PC1 160.092 1 0.894701
PC3 14.8175 0.0925562 0.08281
PC2 4.0241 0.0251361 0.0224893
Pipeline(steps=[('standardize', <h2o.transforms.preprocessing.H2OScaler object at 0x10c1679d0>), ('pca', ), ('gbm', )])
| apache-2.0 |
robertwb/incubator-beam | sdks/python/apache_beam/dataframe/expressions.py | 4 | 15055 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import random
import threading
from typing import Any
from typing import Callable
from typing import Generic
from typing import Iterable
from typing import Optional
from typing import TypeVar
from apache_beam.dataframe import partitionings
class Session(object):
"""A session represents a mapping of expressions to concrete values.
The bindings typically include required placeholders, but may be any
intermediate expression as well.
"""
def __init__(self, bindings=None):
self._bindings = dict(bindings or {})
def evaluate(self, expr): # type: (Expression) -> Any
if expr not in self._bindings:
self._bindings[expr] = expr.evaluate_at(self)
return self._bindings[expr]
def lookup(self, expr): # type: (Expression) -> Any
return self._bindings[expr]
class PartitioningSession(Session):
"""An extension of Session that enforces actual partitioning of inputs.
Each expression is evaluated multiple times for various supported
partitionings determined by its `requires_partition_by` specification. For
each tested partitioning, the input is partitioned and the expression is
evaluated on each partition separately, as if this were actually executed in
a parallel manner.
For each input partitioning, the results are verified to be partitioned
appropriately according to the expression's `preserves_partition_by`
specification.
For testing only.
"""
def evaluate(self, expr):
import pandas as pd
import collections
def is_scalar(expr):
return not isinstance(expr.proxy(), pd.core.generic.NDFrame)
if expr not in self._bindings:
if is_scalar(expr) or not expr.args():
result = super(PartitioningSession, self).evaluate(expr)
else:
scaler_args = [arg for arg in expr.args() if is_scalar(arg)]
def evaluate_with(input_partitioning):
parts = collections.defaultdict(
lambda: Session({arg: self.evaluate(arg)
for arg in scaler_args}))
for arg in expr.args():
if not is_scalar(arg):
input = self.evaluate(arg)
for key, part in input_partitioning.test_partition_fn(input):
parts[key]._bindings[arg] = part
if not parts:
parts[None] # Create at least one entry.
results = []
for session in parts.values():
if any(len(session.lookup(arg)) for arg in expr.args()
if not is_scalar(arg)):
results.append(session.evaluate(expr))
expected_output_partitioning = output_partitioning(
expr, input_partitioning)
if not expected_output_partitioning.check(results):
raise AssertionError(
f"""Expression does not preserve partitioning!
Expression: {expr}
Requires: {expr.requires_partition_by()}
Preserves: {expr.preserves_partition_by()}
Input partitioning: {input_partitioning}
Expected output partitioning: {expected_output_partitioning}
""")
if results:
return pd.concat(results)
else:
# Choose any single session.
return next(iter(parts.values())).evaluate(expr)
# Store random state so it can be re-used for each execution, in case
# the expression is part of a test that relies on the random seed.
random_state = random.getstate()
result = None
# Run with all supported partitionings s.t. the smallest subpartitioning
# is used last. This way the final result is computed with the most
# challenging partitioning. Avoids heisenbugs where sometimes the result
# is computed trivially with Singleton partitioning and passes.
for input_partitioning in sorted(set([expr.requires_partition_by(),
partitionings.Arbitrary(),
partitionings.Index(),
partitionings.Singleton()])):
if not expr.requires_partition_by().is_subpartitioning_of(
input_partitioning):
continue
random.setstate(random_state)
result = evaluate_with(input_partitioning)
assert result is not None
self._bindings[expr] = result
return self._bindings[expr]
# The return type of an Expression
T = TypeVar('T')
def output_partitioning(expr, input_partitioning):
""" Return the expected output partitioning for `expr` when it's input is
partitioned by `input_partitioning`.
For internal use only; No backward compatibility guarantees """
assert expr.requires_partition_by().is_subpartitioning_of(input_partitioning)
if expr.preserves_partition_by().is_subpartitioning_of(input_partitioning):
return min(input_partitioning, expr.preserves_partition_by())
else:
return partitionings.Arbitrary()
class Expression(Generic[T]):
"""An expression is an operation bound to a set of arguments.
An expression represents a deferred tree of operations, which can be
evaluated at a specific bindings of root expressions to values.
requires_partition_by indicates the upper bound of a set of partitionings that
are acceptable inputs to this expression. The expression should be able to
produce the correct result when given input(s) partitioned by its
requires_partition_by attribute, or by any partitoning that is _not_
a subpartitioning of it.
preserves_partition_by indicates the upper bound of a set of partitionings
that can be preserved by this expression. When the input(s) to this expression
are partitioned by preserves_partition_by, or by any partitioning that is
_not_ a subpartitioning of it, this expression should produce output(s)
partitioned by the same partitioning.
However, if the partitioning of an expression's input is a subpartitioning of
the partitioning that it preserves, the output is presumed to have no
particular partitioning (i.e. Arbitrary()).
For example, let's look at an "element-wise operation", that has no
partitioning requirement, and preserves any partitioning given to it::
requires_partition_by = Arbitrary() -----------------------------+
|
+-----------+-------------+---------- ... ----+---------|
| | | | |
Singleton() < Index([i]) < Index([i, j]) < ... < Index() < Arbitrary()
| | | | |
+-----------+-------------+---------- ... ----+---------|
|
preserves_partition_by = Arbitrary() ----------------------------+
As a more interesting example, consider this expression, which requires Index
partitioning, and preserves just Singleton partitioning::
requires_partition_by = Index() -----------------------+
|
+-----------+-------------+---------- ... ----|
| | | |
Singleton() < Index([i]) < Index([i, j]) < ... < Index() < Arbitrary()
|
|
preserves_partition_by = Singleton()
Note that any non-Arbitrary partitioning is an acceptable input for this
expression. However, unless the inputs are Singleton-partitioned, the
expression makes no guarantees about the partitioning of the output.
"""
def __init__(self, name: str, proxy: T, _id: Optional[str] = None):
self._name = name
self._proxy = proxy
# Store for preservation through pickling.
self._id = _id or '%s_%s_%s' % (name, type(proxy).__name__, id(self))
def proxy(self) -> T:
return self._proxy
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return self._id == other._id
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self._id)
def placeholders(self):
"""Returns all the placeholders that self depends on."""
raise NotImplementedError(type(self))
def evaluate_at(self, session: Session) -> T:
"""Returns the result of self with the bindings given in session."""
raise NotImplementedError(type(self))
def requires_partition_by(self) -> partitionings.Partitioning:
"""Returns the partitioning, if any, require to evaluate this expression.
Returns partitioning.Arbitrary() to require no partitioning is required.
"""
raise NotImplementedError(type(self))
def preserves_partition_by(self) -> partitionings.Partitioning:
"""Returns the partitioning, if any, preserved by this expression.
This gives an upper bound on the partitioning of its ouput. The actual
partitioning of the output may be less strict (e.g. if the input was
less partitioned).
"""
raise NotImplementedError(type(self))
class PlaceholderExpression(Expression):
"""An expression whose value must be explicitly bound in the session."""
def __init__(
self, # type: PlaceholderExpression
proxy, # type: T
reference=None, # type: Any
):
"""Initialize a placeholder expression.
Args:
proxy: A proxy object with the type expected to be bound to this
expression. Used for type checking at pipeline construction time.
"""
super(PlaceholderExpression, self).__init__('placeholder', proxy)
self._reference = reference
def placeholders(self):
return frozenset([self])
def args(self):
return ()
def evaluate_at(self, session):
return session.lookup(self)
def requires_partition_by(self):
return partitionings.Arbitrary()
def preserves_partition_by(self):
return partitionings.Index()
class ConstantExpression(Expression):
"""An expression whose value is known at pipeline construction time."""
def __init__(
self, # type: ConstantExpression
value, # type: T
proxy=None # type: Optional[T]
):
"""Initialize a constant expression.
Args:
value: The constant value to be produced by this expression.
proxy: (Optional) a proxy object with same type as `value` to use for
rapid type checking at pipeline construction time. If not provided,
`value` will be used directly.
"""
if proxy is None:
proxy = value
super(ConstantExpression, self).__init__('constant', proxy)
self._value = value
def placeholders(self):
return frozenset()
def args(self):
return ()
def evaluate_at(self, session):
return self._value
def requires_partition_by(self):
return partitionings.Arbitrary()
def preserves_partition_by(self):
return partitionings.Arbitrary()
class ComputedExpression(Expression):
"""An expression whose value must be computed at pipeline execution time."""
def __init__(
self, # type: ComputedExpression
name, # type: str
func, # type: Callable[...,T]
args, # type: Iterable[Expression]
proxy=None, # type: Optional[T]
_id=None, # type: Optional[str]
requires_partition_by=partitionings.Index(), # type: partitionings.Partitioning
preserves_partition_by=partitionings.Singleton(), # type: partitionings.Partitioning
):
"""Initialize a computed expression.
Args:
name: The name of this expression.
func: The function that will be used to compute the value of this
expression. Should accept arguments of the types returned when
evaluating the `args` expressions.
args: The list of expressions that will be used to produce inputs to
`func`.
proxy: (Optional) a proxy object with same type as the objects that this
ComputedExpression will produce at execution time. If not provided, a
proxy will be generated using `func` and the proxies of `args`.
_id: (Optional) a string to uniquely identify this expression.
requires_partition_by: The required (common) partitioning of the args.
preserves_partition_by: The level of partitioning preserved.
"""
if (not _get_allow_non_parallel() and
isinstance(requires_partition_by, partitionings.Singleton)):
reason = requires_partition_by.reason or (
f"Encountered non-parallelizable form of {name!r}.")
raise NonParallelOperation(
f"{reason}\n"
"Consider using an allow_non_parallel_operations block if you're "
"sure you want to do this. See "
"https://s.apache.org/dataframe-non-parallel-operations for more "
"information.")
args = tuple(args)
if proxy is None:
proxy = func(*(arg.proxy() for arg in args))
super(ComputedExpression, self).__init__(name, proxy, _id)
self._func = func
self._args = args
self._requires_partition_by = requires_partition_by
self._preserves_partition_by = preserves_partition_by
def placeholders(self):
return frozenset.union(
frozenset(), *[arg.placeholders() for arg in self.args()])
def args(self):
return self._args
def evaluate_at(self, session):
return self._func(*(session.evaluate(arg) for arg in self._args))
def requires_partition_by(self):
return self._requires_partition_by
def preserves_partition_by(self):
return self._preserves_partition_by
def elementwise_expression(name, func, args):
return ComputedExpression(
name,
func,
args,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
_ALLOW_NON_PARALLEL = threading.local()
_ALLOW_NON_PARALLEL.value = False
def _get_allow_non_parallel():
return _ALLOW_NON_PARALLEL.value
@contextlib.contextmanager
def allow_non_parallel_operations(allow=True):
if allow is None:
yield
else:
old_value, _ALLOW_NON_PARALLEL.value = _ALLOW_NON_PARALLEL.value, allow
yield
_ALLOW_NON_PARALLEL.value = old_value
class NonParallelOperation(Exception):
def __init__(self, msg):
super(NonParallelOperation, self).__init__(self, msg)
self.msg = msg
| apache-2.0 |
shiqiangli/tushare | test/storing_test.py | 40 | 1729 | # -*- coding:utf-8 -*-
import os
from sqlalchemy import create_engine
from pandas.io.pytables import HDFStore
import tushare as ts
def csv():
df = ts.get_hist_data('000875')
df.to_csv('c:/day/000875.csv',columns=['open','high','low','close'])
def xls():
df = ts.get_hist_data('000875')
#直接保存
df.to_excel('c:/day/000875.xlsx', startrow=2,startcol=5)
def hdf():
df = ts.get_hist_data('000875')
# df.to_hdf('c:/day/store.h5','table')
store = HDFStore('c:/day/store.h5')
store['000875'] = df
store.close()
def json():
df = ts.get_hist_data('000875')
df.to_json('c:/day/000875.json',orient='records')
#或者直接使用
print(df.to_json(orient='records'))
def appends():
filename = 'c:/day/bigfile.csv'
for code in ['000875', '600848', '000981']:
df = ts.get_hist_data(code)
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None)
else:
df.to_csv(filename)
def db():
df = ts.get_tick_data('600848',date='2014-12-22')
engine = create_engine('mysql://root:[email protected]/mystock?charset=utf8')
# db = MySQLdb.connect(host='127.0.0.1',user='root',passwd='jimmy1',db="mystock",charset="utf8")
# df.to_sql('TICK_DATA',con=db,flavor='mysql')
# db.close()
df.to_sql('tick_data',engine,if_exists='append')
def nosql():
import pymongo
import json
conn = pymongo.Connection('127.0.0.1', port=27017)
df = ts.get_tick_data('600848',date='2014-12-22')
print(df.to_json(orient='records'))
conn.db.tickdata.insert(json.loads(df.to_json(orient='records')))
# print conn.db.tickdata.find()
if __name__ == '__main__':
nosql() | bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/linear_model/sag.py | 30 | 12959 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| mit |
mxlei01/healthcareai-py | healthcareai/common/randomized_search.py | 4 | 2043 | from sklearn.model_selection import RandomizedSearchCV
def get_algorithm(estimator,
scoring_metric,
hyperparameter_grid,
randomized_search,
number_iteration_samples=10,
**non_randomized_estimator_kwargs):
"""
Given an estimator and various params, initialize an algorithm with optional randomized search.
Args:
estimator (sklearn.base.BaseEstimator): a scikit-learn estimator (for example: KNeighborsClassifier)
scoring_metric (str): The scoring metric to optimized for if using random search. See
http://scikit-learn.org/stable/modules/model_evaluation.html
hyperparameter_grid (dict): An object containing key value pairs of the specific hyperparameter space to search
through.
randomized_search (bool): Whether the method should return a randomized search estimator (as opposed to a
simple algorithm).
number_iteration_samples (int): If performing randomized search, this is the number of samples that are run in
the hyperparameter space. Higher numbers will be slower, but end up with better results, since it is more
likely that the true optimal hyperparameter is found.
**non_randomized_estimator_kwargs: Keyword arguments that you can pass directly to the algorithm. Only used when
radomized_search is False
Returns:
sklearn.base.BaseEstimator: a scikit learn algorithm ready to `.fit()`
"""
if randomized_search:
algorithm = RandomizedSearchCV(estimator=estimator(),
scoring=scoring_metric,
param_distributions=hyperparameter_grid,
n_iter=number_iteration_samples,
verbose=0,
n_jobs=1)
else:
algorithm = estimator(**non_randomized_estimator_kwargs)
return algorithm
| mit |
habi/GlobalDiagnostiX | Demonstrator/detect_lines_in_checkerboard.py | 1 | 2621 | # -*- coding: utf-8 -*-
"""
Script to detect horizontal and vertical lines on a sliding window along images
The script is based on the answers to
[this Stack Overflow question](http://stackoverflow.com/q/7227074/323100)
"""
from __future__ import division
import matplotlib
import matplotlib.pylab as plt
import os
import cv2
import math
BasePath = '/afs/psi.ch/project/EssentialMed/Images/DetectorElectronicsTests/' \
'EssentialLab/Valerie'
tcpIpPool = ['192.168.1.31', '192.168.1.32', '192.168.1.33', '192.168.1.34',
'192.168.1.35', '192.168.1.36', '192.168.1.37', '192.168.1.38',
'192.168.1.39', '192.168.1.40', '192.168.1.41', '192.168.1.42']
ImageName = tcpIpPool[0] + '.png'
plt.figure(figsize=[16, 9])
InputImage = cv2.imread(os.path.join(BasePath, ImageName))
RegionWidth = 200
for i in range(0, InputImage.shape[1] - RegionWidth, RegionWidth):
print i
plt.clf()
plt.suptitle(' '.join(['Region of size', str(RegionWidth),
'starting at px.', str(i)]))
img = InputImage[:, i:i + RegionWidth, :]
# img = cv2.GaussianBlur(img, (5, 5), 0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 120)
lines = cv2.HoughLinesP(cv2.flip(edges, flipCode=0), 1, math.pi / 2, 2,
None, 30, 1)
for line in lines[0]:
pt1 = (line[0], line[1])
pt2 = (line[2], line[3])
plt.subplot(141)
plt.imshow(InputImage, interpolation='none')
window = matplotlib.patches.Rectangle((i, 0), RegionWidth,
InputImage.shape[0], color='blue',
alpha=0.25)
plt.gca().add_patch(window)
plt.axvline(x=i)
plt.axvline(x=i + RegionWidth)
plt.title('Sliding window along image')
plt.subplot(142)
plt.imshow(gray, interpolation='none', cmap='gray')
plt.title('Region')
plt.subplot(143)
plt.imshow(edges, interpolation='none', cmap='gray')
plt.title('Detected Edges')
plt.subplot(144)
plt.imshow(cv2.flip(gray, flipCode=0), interpolation='none', cmap='gray')
for coordinates in lines[0]:
plt.plot([coordinates[0], coordinates[2]],
[coordinates[1], coordinates[3]], color='y', linestyle='-',
linewidth='5', alpha=0.5)
plt.plot([coordinates[0], coordinates[2]],
[coordinates[1], coordinates[3]], color='k', linestyle='-')
plt.xlim([0, img.shape[1]])
plt.ylim([0, img.shape[0]])
plt.title('Detected horizontal\nand vertical lines')
plt.pause(0.001)
plt.show()
| unlicense |
ChellyD65/patchSorter | SortPatches.py | 1 | 22884 | #!/usr/bin/env python
import sys, getopt, os
from math import sqrt
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import *
from PyQt4.QtCore import Qt, pyqtSlot, QMetaObject
import numpy as np
from matplotlib import image as mpimg
from scipy import ndimage
from scipy.misc import imsave
import skimage.color
import skimage.filters
import skimage.morphology
import skimage.feature
import skimage.draw
from skimage.util.shape import view_as_blocks
def toQImage(im):
result = QImage(im.ctypes.data, im.shape[1], im.shape[0], QImage.Format_RGB888)
return result
class ListEdit(QDialog):
def setupUi(self, TestLayout):
TestLayout.setObjectName("ListEdit")
TestLayout.resize(440,240)
self.centralwidget = QtGui.QWidget(TestLayout)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.editBox = QtGui.QLineEdit(self.centralwidget)
self.editBox.setObjectName("editBox")
self.verticalLayout.addWidget(self.editBox)
self.listBox = QtGui.QListWidget(self.centralwidget)
self.listBox.setObjectName("listBox")
self.verticalLayout.addWidget(self.listBox)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.addButton = QtGui.QPushButton(self.centralwidget)
self.addButton.setObjectName("addButton")
self.addButton.setText("Add")
self.verticalLayout_2.addWidget(self.addButton)
self.removeButton = QtGui.QPushButton(self.centralwidget)
self.removeButton.setObjectName("removeButton")
self.removeButton.setText("Remove")
self.verticalLayout_2.addWidget(self.removeButton)
self.clearButton = QtGui.QPushButton(self.centralwidget)
self.clearButton.setObjectName("clearButton")
self.clearButton.setText("Clear")
self.loadButton = QtGui.QPushButton(self.centralwidget)
self.loadButton.setObjectName("clearButton")
self.loadButton.setText("Load File")
self.verticalLayout_2.addWidget(self.loadButton)
self.verticalLayout_2.addWidget(self.clearButton)
spacerItem = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.okButton = QtGui.QPushButton(self.centralwidget)
self.okButton.setText("OK")
self.cancelButton = QtGui.QPushButton(self.centralwidget)
self.cancelButton.setText("Cancel")
self.horizontalLayout_2.addWidget(self.okButton)
self.horizontalLayout_2.addWidget(self.cancelButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout.addLayout(self.verticalLayout_2)
QMetaObject.connectSlotsByName(TestLayout)
# connect buttons
self.connect(self.addButton, QtCore.SIGNAL("clicked()"), self.addToList)
self.connect(self.removeButton, QtCore.SIGNAL("clicked()"), self.removeFromList)
self.connect(self.clearButton, QtCore.SIGNAL("clicked()"), self.clearList)
self.connect(self.loadButton, QtCore.SIGNAL("clicked()"), self.loadListFile)
self.connect(self.okButton, QtCore.SIGNAL("clicked()"), self.ListUpdate)
self.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), self.cancelListUpdate)
def addToList(self):
self.listBox.addItem(self.editBox.text())
self.editBox.clear()
def removeFromList(self):
for item in self.listBox.selectedItems():
self.listBox.takeItem(self.listBox.row(item))
def clearList(self):
self.listBox.clear()
def ListUpdate(self):
self.accept()
def cancelListUpdate(self):
self.close()
def getList(self):
return [str(i.text()) for i in self.listBox.findItems("", QtCore.Qt.MatchContains)]
def setList(self, listEntries):
for i in listEntries:
self.listBox.addItem(i)
def loadListFile(self):
classfile = QFileDialog.getOpenFileName(self, 'Open file', os.path.curdir)
if classfile:
self.clearList()
with open(classfile) as f:
lines = f.read().splitlines()
self.setList(lines)
class patchSorter(QMainWindow):
def __init__(self, inputfile, dims, outname):
super(patchSorter, self).__init__()
self.inputfile = inputfile
self.setupPatches(dims)
self.initUI(self.view,self.im,outname)
def setupPatches(self, dims):
self.dims = dims
self.im = mpimg.imread(self.inputfile)
self.block_shape = (self.dims[0], self.dims[1], self.im.shape[2]) #height, width
margin=np.mod(self.im.shape,self.block_shape)
self.im_crop = self.im[:(self.im.shape-margin)[0],:(self.im.shape-margin)[1],:(self.im.shape-margin)[2]]
self.view = view_as_blocks(self.im_crop, self.block_shape)
def initUI(self, view, mainimage, outname):
self.view = view
self.mainimage = mainimage
self.outname = outname
self.gui = mmdGUI(self)
self.gui.setupImages(self.view, self.mainimage, self.outname)
self.gui.setupInterface()
self.gui.unrandomize()
self.setCentralWidget(self.gui)
openAction = QAction('Open', self)
openAction.setShortcut('Ctrl+O')
openAction.setStatusTip('Open an image file')
openAction.triggered.connect(self.fileOpen)
exitAction = QAction(QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(qApp.quit)
setclassAction = QAction('Set Classes', self)
setclassAction.setShortcut('Ctrl+C')
setclassAction.setStatusTip('Set the name of the Class Buttons')
setclassAction.triggered.connect(self.setClasses)
setpatchsizeAction = QAction('Set Patch Size', self)
setpatchsizeAction.setShortcut('Ctrl+P')
setpatchsizeAction.setStatusTip('Set the edge size of the square patches')
setpatchsizeAction.triggered.connect(self.setPatchSize)
randomizeAction = QAction('Randomize Order', self)
randomizeAction.triggered.connect(self.gui.randomize)
unrandomizeAction = QAction('Unrandomize Order', self)
unrandomizeAction.triggered.connect(self.gui.unrandomize)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openAction)
fileMenu.addAction(exitAction)
editMenu = menubar.addMenu('&Edit')
editMenu.addAction(setclassAction)
editMenu.addAction(setpatchsizeAction)
optionMenu = menubar.addMenu('&Options')
optionMenu.addAction(randomizeAction)
optionMenu.addAction(unrandomizeAction)
self.setWindowTitle("Patch sorter")
self.resize(1000,800)
self.show()
def fileOpen(self):
reply = QMessageBox.question(self, "Open a new main image file?",
"This will restart labeling. Continue?",
QMessageBox.Yes | QMessageBox.No )
if reply == QMessageBox.Yes:
newImageFile = QFileDialog.getOpenFileName(self, 'Open file', os.path.curdir)
if newImageFile:
self.inputfile = str(newImageFile)
self.setupPatches(self.dims)
self.gui.setupImages(self.view, self.im, self.outname)
self.gui.labelPatchNum.setText("("+str(self.gui.i+1)+"/"+str(self.gui.viewlist.shape[0])+")")
self.gui.patchsizeStatus.setText("Patch size: "+str(self.view.shape[3])+"x"+str(self.view.shape[4]))
self.gui.outname = os.path.splitext(os.path.abspath(self.inputfile))[0]
self.gui.updateClasses()
self.gui.i = 0
self.gui.thispatch = np.copy(self.gui.viewlist[self.gui.idxl[self.gui.i],:,:,:])
pixmap = QPixmap.fromImage(toQImage(self.gui.thispatch))
self.gui.labelPatch.setPixmap(pixmap.scaled(100, 100, Qt.KeepAspectRatio,Qt.SmoothTransformation))
self.gui.labelPatchNum.setText("("+str(self.gui.i+1)+"/"+str(self.gui.viewlist.shape[0])+")")
self.gui.showPatchLoc(self.gui.idxl[self.gui.i])
def setClasses(self):
reply = QMessageBox.question(self, "Set the class names?",
"This will set new classes and restart labeling, with output into new folders. Continue?",
QMessageBox.Yes | QMessageBox.No )
if reply == QMessageBox.Yes:
l = ListEdit()
l.setupUi(l)
l.setList(self.gui.classNames)
if(l.exec_()):
self.gui.classNames = l.getList()
self.gui.updateClasses()
else:
return 0
def setPatchSize(self):
reply = QMessageBox.question(self, "Set the patch size?",
"This will set new edge lengths for the patched and restart labeling, with output into new folders. Continue?",
QMessageBox.Yes | QMessageBox.No )
if reply == QMessageBox.Yes:
text, ok = QInputDialog.getText(self, 'Patch Edge Length',
'Enter the Patch Edge Length in Pixels (it will be rounded to nearest multiple of 8):')
if ok:
if str(text).isdigit():
newPatchSize = int(str(text))
newPatchSize = int(np.ceil(newPatchSize/8.0) * 8) # must be multiple of 8
dims = [newPatchSize, newPatchSize]
self.setupPatches(dims)
self.gui.setupImages(self.view, self.im, self.outname)
self.gui.i = 0
self.gui.showPatchLoc(self.gui.idxl[self.gui.i])
self.gui.labelPatchNum.setText("("+str(self.gui.i+1)+"/"+str(self.gui.viewlist.shape[0])+")")
self.gui.patchsizeStatus.setText("Patch size: "+str(self.view.shape[3])+"x"+str(self.view.shape[4]))
else:
reply = QMessageBox.warning(self,"Invalid patch edge length","Value must be an integer.")
class mmdGUI(QFrame):
###
# Create GUI (requires Qt4)
###
def __init__(self, parent):
super(mmdGUI,self).__init__(parent)
self.PatchCursorColor = np.array([0,255,0])
self.wholeImageW = 600
self.wholeImageH = 600
self.outdirsExist = False
self.classNames = ("Interesting", "Boring", "Useless")
# Create the button click actions
@pyqtSlot()
def on_click(self, classname):
def calluser():
if not self.outdirsExist:
self.makeoutdirs()
f_out = os.path.join(self.outDirs[self.classNames.index(classname)], "patch_"+str(self.i)+".png")
imsave(f_out, self.thispatch)
self.i = self.i+1
if self.i <= self.idxl.shape[0]-1:
self.showCurrentPatch()
self.lastpickPatch.setText("Last: " + classname)
else:
msgBox = QMessageBox()
msgBox.setText("All patches have been classified. You're Done!")
msgBox.exec_()
sys.exit()
return calluser
@pyqtSlot()
def keyPressEvent(self, event):
if str(event.text()).isdigit():
ind = int(str(event.text())) - 1
if ind < len(self.classNames):
self.on_click(self.classNames[ind])()
def onResize(self, event):
winWidth = event.size().width()
winHeight = event.size().height()
self.wholeImageW = int(0.8*winWidth)
self.wholeImageH = int(0.8*winHeight)
self.showPatchLoc(self.idxl[self.i])
def showPatchLoc(self,patchNum):
tmpwhole = np.copy(self.wholeim)
m,n = np.unravel_index(patchNum,(self.view.shape[0],self.view.shape[1]))
# Coordinates for rectangle
rr1,cc1 = skimage.draw.line(m*self.view.shape[3],n*self.view.shape[4],m*self.view.shape[3],(n+1)*self.view.shape[4]-1)
rr11,cc11 = skimage.draw.line(m*self.view.shape[3]+1,n*self.view.shape[4],m*self.view.shape[3]+1,(n+1)*self.view.shape[4]-1)
rr12,cc12 = skimage.draw.line(m*self.view.shape[3]+2,n*self.view.shape[4],m*self.view.shape[3]+2,(n+1)*self.view.shape[4]-1)
rr = np.concatenate((rr1,rr11,rr12)); cc = np.concatenate((cc1,cc11,cc12))
skimage.draw.set_color(tmpwhole,(rr,cc),self.PatchCursorColor)
rr2,cc2 = skimage.draw.line(m*self.view.shape[3],n*self.view.shape[4],(m+1)*self.view.shape[3]-1,n*self.view.shape[4])
rr21,cc21 = skimage.draw.line(m*self.view.shape[3],n*self.view.shape[4]+1,(m+1)*self.view.shape[3]-1,n*self.view.shape[4]+1)
rr22,cc22 = skimage.draw.line(m*self.view.shape[3],n*self.view.shape[4]+2,(m+1)*self.view.shape[3]-1,n*self.view.shape[4]+2)
rr = np.concatenate((rr2,rr21,rr22)); cc = np.concatenate((cc2,cc21,cc22))
skimage.draw.set_color(tmpwhole,(rr,cc),self.PatchCursorColor)
rr3,cc3 = skimage.draw.line((m+1)*self.view.shape[3]-1,(n+1)*self.view.shape[4]-1,m*self.view.shape[3],(n+1)*self.view.shape[4]-1)
rr31,cc31 = skimage.draw.line((m+1)*self.view.shape[3]-1,(n+1)*self.view.shape[4]-2,m*self.view.shape[3],(n+1)*self.view.shape[4]-2)
rr32,cc32 = skimage.draw.line((m+1)*self.view.shape[3]-1,(n+1)*self.view.shape[4]-3,m*self.view.shape[3],(n+1)*self.view.shape[4]-3)
rr = np.concatenate((rr3,rr31,rr32)); cc = np.concatenate((cc3,cc31,cc32))
skimage.draw.set_color(tmpwhole,(rr,cc),self.PatchCursorColor)
rr4,cc4 = skimage.draw.line((m+1)*self.view.shape[3]-1,(n+1)*self.view.shape[4]-1,(m+1)*self.view.shape[3]-1,n*self.view.shape[4])
rr41,cc41 = skimage.draw.line((m+1)*self.view.shape[3]-2,(n+1)*self.view.shape[4]-1,(m+1)*self.view.shape[3]-2,n*self.view.shape[4])
rr42,cc42 = skimage.draw.line((m+1)*self.view.shape[3]-3,(n+1)*self.view.shape[4]-1,(m+1)*self.view.shape[3]-3,n*self.view.shape[4])
rr = np.concatenate((rr4,rr41,rr42)); cc = np.concatenate((cc4,cc41,cc42))
skimage.draw.set_color(tmpwhole,(rr,cc),self.PatchCursorColor)
pixmapWhole = QPixmap.fromImage(toQImage(tmpwhole))
self.labelWhole.setPixmap(pixmapWhole.scaled(self.wholeImageW, self.wholeImageH, Qt.KeepAspectRatio,Qt.SmoothTransformation))
def updateClasses(self):
for button in self.buttonList:
button.setParent(None)
self.bottom_area.setParent(None)
self.buttonList = []
buttoni = 1
for cname in self.classNames:
this_button = QPushButton(cname+" ("+str(buttoni)+")", self)
this_button.setFont(QFont("Arial",18, QFont.Bold))
this_button.resize(300,100)
this_button.clicked.connect(self.on_click(cname))
self.buttonList.append(this_button)
buttoni = buttoni+1
self.bottom_area = QHBoxLayout()
self.bottom_area.addStretch(1)
self.bottom_area.addWidget(self.labelPatch)
for button in self.buttonList:
self.bottom_area.addWidget(button)
self.bottom_area.addStretch(1)
if not isinstance(self.parent(), QMainWindow):
self.bottom_area.addWidget(btnq)
self.vbox.addLayout(self.bottom_area)
self.i = 0
self.outdirsExist = False
def makeoutdirs(self):
# Make the output directory
self.outDirs = []
for cname in self.classNames:
self.outDirs.append(self.outname + "_" + "".join(x for x in cname if x.isalnum()))
if not os.path.exists(self.outDirs[-1]):
os.makedirs(self.outDirs[-1])
print("Using output dirs:\n" + "\n".join(self.outDirs))
self.outdirsExist = True
def setupImages(self, view, wholeim, outname='patches_Output'):
self.view = view
self.wholeim = wholeim
self.viewlist = view.reshape(view.shape[0]*view.shape[1]*view.shape[2],view.shape[3],view.shape[4],view.shape[5])
self.outname = outname
self.idxl = np.random.permutation(range(0,self.viewlist.shape[0]))
def showCurrentPatch(self):
self.thispatch = np.copy(self.viewlist[self.idxl[self.i],:,:,:])
pixmap = QPixmap.fromImage(toQImage(self.thispatch))
self.labelPatch.setPixmap(pixmap.scaled(100, 100, Qt.KeepAspectRatio,Qt.SmoothTransformation))
self.labelPatchNum.setText("("+str(self.i+1)+"/"+str(self.viewlist.shape[0])+")")
self.showPatchLoc(self.idxl[self.i])
def unrandomize(self):
self.idxl = np.concatenate([self.idxl[0:self.i],np.sort(self.idxl[self.i:])])
self.showCurrentPatch()
def randomize(self):
self.idxl = np.concatenate([self.idxl[0:self.i],np.random.permutation(self.idxl[self.i:])])
self.showCurrentPatch()
def setupInterface(self):
self.w = self
self.w.setWindowTitle("Patch sorter")
self.resizeEvent = self.onResize
self.resize(1170,1000)
# Widget for showing the whole image, with location box
self.labelWhole = QLabel(self)
# Widget for showing the current patch
self.labelPatch = QLabel(self.w)
self.labelPatch.setFrameStyle(QFrame.Panel)
# Create the label for showing the current patch number
self.labelPatchNum = QLabel(self)
self.labelPatchNum.resize(110,20)
self.labelPatchNum.setAlignment(Qt.AlignRight)
self.labelPatchNum.setText("(1/"+str(self.viewlist.shape[0])+")")
self.labelPatchNum.setFont(QFont("Arial",14, QFont.Bold))
self.lastpickPatch = QLabel(self)
self.lastpickPatch.resize(110,20)
self.lastpickPatch.setAlignment(Qt.AlignRight)
self.lastpickPatch.setText("Last: ")
self.lastpickPatch.setFont(QFont("Arial",14, QFont.Bold))
self.patchsizeStatus = QLabel(self)
self.patchsizeStatus.resize(110,20)
self.patchsizeStatus.setAlignment(Qt.AlignRight)
self.patchsizeStatus.setText("Patch size: "+str(self.view.shape[3])+"x"+str(self.view.shape[4]))
self.patchsizeStatus.setFont(QFont("Arial",14, QFont.Bold))
# Display area for the current whole image
self.i = 0
self.thispatch = self.viewlist[self.idxl[self.i],:,:,:]
pixmap = QPixmap.fromImage(toQImage(self.thispatch))
self.labelPatch.setPixmap(pixmap.scaled(100, 100, Qt.KeepAspectRatio,Qt.SmoothTransformation))
self.showPatchLoc(self.idxl[self.i])
# Add buttons
if not isinstance(self.parent(), QMainWindow):
btnq = QPushButton('Quit', self)
btnq.setToolTip('Click to quit!')
btnq.clicked.connect(exit)
btnq.resize(btnq.sizeHint())
self.buttonList = []
for cname in self.classNames:
this_button = QPushButton(cname, self)
this_button.setFont(QFont("Arial",18, QFont.Bold))
this_button.resize(300,100)
this_button.clicked.connect(self.on_click(cname))
self.buttonList.append(this_button)
# Set up the layout of the GUI
top_area = QHBoxLayout()
top_area.addStretch(1)
top_area.addWidget(self.labelWhole)
top_area.addStretch(1)
middle_area = QHBoxLayout()
middle_area.addStretch(1)
if isinstance(self.parent(), QMainWindow):
self.parent().statusBar().insertWidget(0,self.labelPatchNum)
self.parent().statusBar().insertWidget(0,self.patchsizeStatus)
self.parent().statusBar().insertPermanentWidget(1,self.lastpickPatch, stretch=200)
else:
middle_area.addWidget(self.labelPatchNum)
self.bottom_area = QHBoxLayout()
self.bottom_area.addStretch(1)
self.bottom_area.addWidget(self.labelPatch)
for button in self.buttonList:
self.bottom_area.addWidget(button)
self.bottom_area.addStretch(1)
if not isinstance(self.parent(), QMainWindow):
self.bottom_area.addWidget(btnq)
self.vbox = QVBoxLayout()
self.vbox.addLayout(top_area)
self.vbox.addLayout(middle_area)
self.vbox.addLayout(self.bottom_area)
self.setLayout(self.vbox)
# ----------------------------------------------------------------------------------------------------
# Main method
if __name__ == "__main__":
inputfile = "data/example.jpg" #default
outputdir = None
classfile = None
dim1 = 256 # default patch size is a 256x256 square
dim2 = 256
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:o:c:s:",["inputfile=","outputdir=","classfile=","size="])
except getopt.GetoptError:
print 'python SortPatches.py -i <inputfile> -o <outputdir> -c <classfile> -s <patchheight>,<patchwidth>'
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--inputfile"):
inputfile = arg
if opt in ("-o", "--outputdir"):
outputdir = arg
if opt in ("-c", "--classfile"):
classfile = arg
if opt in ("-s", "--size"):
serr = False
aarg = arg.split(',')
if len(aarg) == 2:
dim1 = float(aarg[0])
dim2 = float(aarg[1])
if(dim1 > 0):
dim1 = int(np.ceil(dim1/8.0) * 8)
else:
serr = True
if(dim2 > 0):
dim2 = int(np.ceil(dim2/8.0) * 8)
else:
serr = True
else:
serr = True
if serr:
print("Size argument (-s or --size) should be of the form m,n. Sizes must be >0 and are rounded to the nearest byte (multiple of 8).")
sys.exit(2)
if not outputdir:
outputdir = os.path.splitext(os.path.abspath(inputfile))[0]
app = QApplication([])
p = patchSorter(inputfile, [dim1, dim2], outname=outputdir)
if classfile:
with open(classfile) as f:
lines = f.read().splitlines()
p.gui.classNames = lines
p.gui.updateClasses()
sys.exit(app.exec_())
| gpl-2.0 |
billy-inn/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
BioMedIA/irtk-legacy | wrapping/cython/irtk/ext/chanvese.py | 5 | 9929 |
# http://www.creatis.insa-lyon.fr/~bernard/creaseg/
# http://ascratchpad.blogspot.com/2011/03/image-segmentation-using-active.html
#------------------------------------------------------------------------
# Region Based Active Contour Segmentation
#
# seg = region_seg(I,init_mask,max_its,alpha,display)
#
# Inputs: I 2D image
# init_mask Initialization (1 = foreground, 0 = bg)
# max_its Number of iterations to run segmentation for
# alpha (optional) Weight of smoothing term
# higer = smoother. default = 0.2
# display (optional) displays intermediate outputs
# default = true
#
# Outputs: seg Final segmentation mask (1=fg, 0=bg)
#
# Description: This code implements the paper: "Active Contours Without
# Edges" By Chan Vese. This is a nice way to segment images whose
# foregrounds and backgrounds are statistically different and homogeneous.
#
# Example:
# img = imread('tire.tif');
# m = zeros(size(img));
# m(33:33+117,44:44+128) = 1;
# seg = region_seg(img,m,500);
#
# Coded by: Shawn Lankton (www.shawnlankton.com)
#------------------------------------------------------------------------
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
eps = np.finfo(np.float).eps
def chanvese(I,init_mask,max_its=200,alpha=0.2,thresh=0,color='r',display=False):
I = I.astype('float')
#-- Create a signed distance map (SDF) from mask
phi = mask2phi(init_mask)
if display:
plt.ion()
showCurveAndPhi(I, phi, color)
plt.savefig('levelset_start.pdf',bbox_inches='tight')
#--main loop
its = 0
stop = False
prev_mask = init_mask
c = 0
while (its < max_its and not stop):
# get the curve's narrow band
idx = np.flatnonzero( np.logical_and( phi <= 1.2, phi >= -1.2) )
if len(idx) > 0:
#-- intermediate output
if display:
if np.mod(its,50) == 0:
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
print 'iteration:', its
showCurveAndPhi(I, phi, color)
else:
if np.mod(its,10) == 0:
print 'iteration:', its
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
#drawnow;
#-- find interior and exterior mean
upts = np.flatnonzero(phi<=0) # interior points
vpts = np.flatnonzero(phi>0) # exterior points
u = np.sum(I.flat[upts])/(len(upts)+eps) # interior mean
v = np.sum(I.flat[vpts])/(len(vpts)+eps) # exterior mean
F = (I.flat[idx]-u)**2-(I.flat[idx]-v)**2 # force from image information
curvature = get_curvature(phi,idx) # force from curvature penalty
dphidt = F /np.max(np.abs(F)) + alpha*curvature # gradient descent to minimize energy
#-- maintain the CFL condition
dt = 0.45/(np.max(np.abs(dphidt))+eps)
#-- evolve the curve
phi.flat[idx] += dt*dphidt
#-- Keep SDF smooth
phi = sussman(phi, 0.5)
new_mask = phi<=0
c = convergence(prev_mask,new_mask,thresh,c)
if c <= 5:
its = its + 1
prev_mask = new_mask
else: stop = True
else:
break
#-- final output
if display:
showCurveAndPhi(I, phi, color)
#plt.savefig('levelset_end.pdf',bbox_inches='tight')
time.sleep(10)
#-- make mask from SDF
seg = phi<=0 #-- Get mask from levelset
return seg,phi,its
#---------------------------------------------------------------------
#---------------------------------------------------------------------
#-- AUXILIARY FUNCTIONS ----------------------------------------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
def bwdist(a):
"""
this is an intermediary function, 'a' has only True, False vals,
so we convert them into 0, 1 values -- in reverse. True is 0,
False is 1, distance_transform_edt wants it that way.
"""
return nd.distance_transform_edt(a == 0)
import time
#-- Displays the image with curve superimposed
def showCurveAndPhi(I, phi, color):
# subplot(numRows, numCols, plotNum)
#myplot = plt.subplot(121)
#fig, axes = plt.subplots()
#axes = myplot.axes
#axes.get_xaxis().set_visible(False)
#axes.get_yaxis().set_visible(False)
plt.clf()
plt.imshow(I, cmap='gray')
#plt.hold(True)
CS = plt.contour(phi, 0, colors=color)
plt.draw()
#plt.hold(False)
# myplot = plt.subplot(122)
# axes = myplot.axes
# axes.get_xaxis().set_visible(False)
# axes.get_yaxis().set_visible(False)
# plt.imshow(phi)
plt.draw()
#time.sleep(1)
def im2double(a):
a = a.astype('float')
a /= a.max()
return a
#-- converts a mask to a SDF
def mask2phi(init_a):
phi = bwdist(init_a)-bwdist(1-init_a)+im2double(init_a) -0.5
return phi
#-- compute curvature along SDF
def get_curvature(phi,idx):
dimy, dimx = phi.shape
yx = np.array([np.unravel_index(i, phi.shape)for i in idx]) # get subscripts
y = yx[:,0]
x = yx[:,1]
#-- get subscripts of neighbors
ym1 = y-1; xm1 = x-1; yp1 = y+1; xp1 = x+1;
#-- bounds checking
ym1[ym1<0] = 0; xm1[xm1<0] = 0;
yp1[yp1>=dimy]=dimy - 1; xp1[xp1>=dimx] = dimx - 1;
#-- get indexes for 8 neighbors
idup = np.ravel_multi_index( (yp1,x),phi.shape)
iddn = np.ravel_multi_index( (ym1,x),phi.shape)
idlt = np.ravel_multi_index( (y,xm1),phi.shape)
idrt = np.ravel_multi_index( (y,xp1),phi.shape)
idul = np.ravel_multi_index( (yp1,xm1),phi.shape)
idur = np.ravel_multi_index( (yp1,xp1),phi.shape)
iddl = np.ravel_multi_index( (ym1,xm1),phi.shape)
iddr = np.ravel_multi_index( (ym1,xp1),phi.shape)
#-- get central derivatives of SDF at x,y
phi_x = -phi.flat[idlt]+phi.flat[idrt]
phi_y = -phi.flat[iddn]+phi.flat[idup]
phi_xx = phi.flat[idlt]-2*phi.flat[idx]+phi.flat[idrt]
phi_yy = phi.flat[iddn]-2*phi.flat[idx]+phi.flat[idup]
phi_xy = (-0.25*phi.flat[iddl]-0.25*phi.flat[idur]
+0.25*phi.flat[iddr]+0.25*phi.flat[idul])
phi_x2 = phi_x**2
phi_y2 = phi_y**2
#-- compute curvature (Kappa)
curvature = ( ((phi_x2*phi_yy + phi_y2*phi_xx - 2*phi_x*phi_y*phi_xy)
/ (phi_x2 + phi_y2 +eps)**(3/2))
*(phi_x2 + phi_y2)**(1/2))
return curvature
#-- level set re-initialization by the sussman method
def sussman(D, dt):
# forward/backward differences
a = D - shiftR(D) # backward
b = shiftL(D) - D # forward
c = D - shiftD(D) # backward
d = shiftU(D) - D # forward
a_p = a.copy(); a_n = a.copy(); # a+ and a-
b_p = b.copy(); b_n = b.copy();
c_p = c.copy(); c_n = c.copy();
d_p = d.copy(); d_n = d.copy();
a_p[a < 0] = 0
a_n[a > 0] = 0
b_p[b < 0] = 0
b_n[b > 0] = 0
c_p[c < 0] = 0
c_n[c > 0] = 0
d_p[d < 0] = 0
d_n[d > 0] = 0
dD = np.zeros(D.shape)
D_neg_ind = np.flatnonzero(D < 0)
D_pos_ind = np.flatnonzero(D > 0)
dD.flat[D_pos_ind] = np.sqrt( np.max( np.concatenate( ([a_p.flat[D_pos_ind]**2],
[b_n.flat[D_pos_ind]**2]) ),
axis=0
)
+ np.max( np.concatenate( ([c_p.flat[D_pos_ind]**2],
[d_n.flat[D_pos_ind]**2])),
axis=0
)
) - 1
dD.flat[D_neg_ind] = np.sqrt( np.max( np.concatenate( ([a_n.flat[D_neg_ind]**2],
[b_p.flat[D_neg_ind]**2])),
axis=0
)
+ np.max( np.concatenate( ([c_n.flat[D_neg_ind]**2],
[d_p.flat[D_neg_ind]**2]) ),
axis=0
)
) - 1
D = D - dt * sussman_sign(D) * dD
return D
#-- whole matrix derivatives
def shiftD(M):
return shiftR(M.transpose()).transpose()
def shiftL(M):
#shift = np.concatenate( (M[:,1:], np.zeros((M.shape[1],1))), axis=1 )
#shift = np.concatenate( (M[:,1:], M[:,-1]), axis=1 )
shift = M[:,range(1,M.shape[1])+[M.shape[1]-1]]
return shift
def shiftR(M):
#shift = np.concatenate( (np.zeros((M.shape[1],1)), M[:,:-1]), axis=1 )
#shift = np.concatenate( (M[:,0], M[:,:-1]), axis=1 )
shift = M[:,[0]+range(0,M.shape[1]-1)]
return shift
def shiftU(M):
return shiftL(M.transpose()).transpose()
def sussman_sign(D):
return D / np.sqrt(D**2 + 1)
# Convergence Test
def convergence(p_mask,n_mask,thresh,c):
diff = p_mask - n_mask
n_diff = np.sum(np.abs(diff))
if n_diff < thresh:
c = c + 1
else:
c = 0
return c
if __name__ == "__main__":
import cv2
img = cv2.imread("/home/kevin/Imperial/PhD/DATASETS/Training/positive/246_cropped_c/8.png_0022_0115_0117_0132_0132_0.png",0)
#img = nd.imread('sagittal8.png')
mask = np.zeros(img.shape)
mask[55:65,55:65] = 1
chanvese(img,mask,max_its=2000,display=True,alpha=0.1)
| bsd-3-clause |
EricSB/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/collections.py | 69 | 39876 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
| agpl-3.0 |
jakevdp/mpld3 | doc/sphinxext/plot_generator.py | 2 | 10649 | import sys
import os
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg') # don't display plots
import mpld3
from matplotlib import image
from matplotlib.figure import Figure
class disable_mpld3(object):
"""Context manager to temporarily disable mpld3.show() command"""
def __enter__(self):
self.show = mpld3.show
mpld3.show = lambda *args, **kwargs: None
return self
def __exit__(self, type, value, traceback):
mpld3.show = self.show
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. raw:: html
{img_html}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
display: inline;
width: 170px;
height: 170px;
opacity:0.4;
filter:alpha(opacity=40); /* For IE8 and earlier */
}}
.figure img:hover
{{
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure .caption {{
width: 180px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example Gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
BANNER_JS_TEMPLATE = """
var banner_data = {banner_data};
banner_data.forEach(function(d, i) {{
d.i = i;
}});
var height = 150,
width = 900,
imageHeight = 150,
imageWidth = 150,
zoomfactor = 0.1;
var banner = d3.select(".example-banner");
banner.style("height", height + "px")
.style("width", width + "px")
.style("margin-left", "auto")
.style("margin-right", "auto");
var svg = banner.append("svg")
.attr("width", width + "px")
.attr("height", height + "px");
var anchor = svg.append("g")
.attr("class", "example-anchor")
.selectAll("a")
.data(banner_data.slice(0, 7));
anchor.exit().remove();
var anchor_elements = anchor.enter().append("a")
.attr("xlink:href", function(d) {{ return d.url; }})
.attr("xlink:title", function(d) {{ return d.title; }});
anchor_elements.append("svg:image")
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * imageHeight)
.attr("xlink:href", function(d){{ return d.thumb; }})
.attr("xroot", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("x", function(d){{return d3.round(imageWidth * (d.i - 0.5));}})
.attr("y", d3.round(0.5 * zoomfactor * imageHeight))
.attr("i", function(d){{return d.i;}})
.on("mouseover", function() {{
var img = d3.select(this);
img.transition()
.attr("width", imageWidth)
.attr("height", height)
.attr("x", img.attr("xroot")
- d3.round(0.5 * zoomfactor * imageWidth))
.attr("y", 0);
}})
.on("mouseout", function() {{
var img = d3.select(this);
img.transition()
.attr("width", (1 - zoomfactor) * imageWidth)
.attr("height", (1 - zoomfactor) * height)
.attr("x", img.attr("xroot"))
.attr("y", d3.round(0.5 * zoomfactor * imageHeight));
}});
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.6, border=4):
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
baseout, extout = os.path.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - 0.5 * width)
y0 = int(cy * rows - 0.5 * height)
thumb = im[y0: y0 + height,
x0: x0 + width]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
extension = extout.lower()
if extension == '.png':
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
elif extension == '.pdf':
from matplotlib.backends.backend_pdf \
import FigureCanvasPDF as FigureCanvas
elif extension == '.svg':
from matplotlib.backends.backend_svg \
import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'")
fig = Figure(figsize=(float(width) / dpi, float(height) / dpi),
dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.extract_docstring()
self.exec_file()
@property
def dirname(self):
return os.path.split(self.filename)[0]
@property
def fname(self):
return os.path.split(self.filename)[1]
@property
def modulename(self):
return os.path.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
return self.modulename + '.png'
@property
def thumbfilename(self):
# TODO: don't hard-code image path
return "_images/" + self.pngfilename
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.pop)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
with disable_mpld3():
import matplotlib.pyplot as plt
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
exec(compile(open(self.filename, "rb").read(), self.filename, 'exec'), my_globals)
fig = plt.gcf()
self.html = mpld3.fig_to_html(fig)
thumbfile = os.path.join(self.target_dir,
self.pngfilename)
fig.savefig(thumbfile)
create_thumbnail(thumbfile, thumbfile)
def toctree_entry(self):
return " ./%s\n\n" % os.path.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. figure:: ./{0}\n"
" :target: ./{1}\n"
" :align: center\n\n"
" :ref:`{2}`\n\n".format(self.pngfilename,
self.htmlfilename,
self.sphinxtag))
def main(app):
static_dir = os.path.join(app.builder.srcdir, '_static')
target_dir = os.path.join(app.builder.srcdir, 'examples')
source_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'..', 'examples'))
if not os.path.exists(static_dir):
os.makedirs(static_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(os.path.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": os.path.join('examples', ex.htmlfilename),
"thumb": os.path.join(ex.thumbfilename)})
shutil.copyfile(filename, os.path.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_html=indent(ex.html, 4))
with open(os.path.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = os.path.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example-gallery",
toctree=toctree,
contents=contents))
# write javascript include for front page
js_file = os.path.join(static_dir, 'banner_data.js')
with open(js_file, 'w') as js:
js.write(BANNER_JS_TEMPLATE.format(
banner_data=json.dumps(banner_data)))
def setup(app):
app.connect('builder-inited', main)
| bsd-3-clause |
teonlamont/mne-python | mne/preprocessing/ica.py | 2 | 105797 | # -*- coding: utf-8 -*-
#
# Authors: Denis A. Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Juergen Dammers <[email protected]>
#
# License: BSD (3-clause)
from inspect import isfunction
from collections import namedtuple
from copy import deepcopy
from numbers import Integral
from time import time
import os
import json
import numpy as np
from scipy import linalg
from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
create_ecg_epochs)
from .eog import _find_eog_events, _get_eog_channel_index
from .infomax_ import infomax
from ..cov import compute_whitener
from .. import Covariance, Evoked
from ..io.pick import (pick_types, pick_channels, pick_info,
_pick_data_channels, _DATA_CH_TYPES_SPLIT)
from ..io.write import (write_double_matrix, write_string,
write_name_list, write_int, start_block,
end_block)
from ..io.tree import dir_tree_find
from ..io.open import fiff_open
from ..io.tag import read_tag
from ..io.meas_info import write_meas_info, read_meas_info
from ..io.constants import Bunch, FIFF
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..viz import (plot_ica_components, plot_ica_scores,
plot_ica_sources, plot_ica_overlay)
from ..viz.ica import plot_ica_properties
from ..viz.topomap import _plot_corrmap
from ..channels.channels import _contains_ch_type, ContainsMixin
from ..io.write import start_file, end_file, write_id
from ..utils import (check_version, logger, check_fname, verbose,
_reject_data_segments, check_random_state,
compute_corr, _get_inst_data, _ensure_int,
copy_function_doc_to_method_doc, _pl, warn,
_check_preload, _check_compensation_grade)
from ..fixes import _get_args
from ..filter import filter_data
from .bads import find_outliers
from .ctps_ import ctps
from ..externals.six import string_types, text_type
from ..io.pick import channel_type
__all__ = ('ICA', 'ica_find_ecg_events', 'ica_find_eog_events',
'get_score_funcs', 'read_ica', 'run_ica')
def _make_xy_sfunc(func, ndim_output=False):
"""Aux function."""
if ndim_output:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])[:, 0]
else:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])
sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
sfunc.__doc__ = func.__doc__
return sfunc
# makes score funcs attr accessible for users
def get_score_funcs():
"""Get the score functions."""
from scipy import stats
from scipy.spatial import distance
score_funcs = Bunch()
xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()
if isfunction(f) and not n.startswith('_')]
xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()
if isfunction(f) and not n.startswith('_')]
score_funcs.update(dict((n, _make_xy_sfunc(f))
for n, f in xy_arg_dist_funcs
if _get_args(f) == ['u', 'v']))
score_funcs.update(dict((n, _make_xy_sfunc(f, ndim_output=True))
for n, f in xy_arg_stats_funcs
if _get_args(f) == ['x', 'y']))
return score_funcs
def _check_for_unsupported_ica_channels(picks, info):
"""Check for channels in picks that are not considered valid channels.
Accepted channels are the data channels
('seeg','ecog','eeg', 'hbo', 'hbr', 'mag', and 'grad') and 'eog'.
This prevents the program from crashing without
feedback when a bad channel is provided to ICA whitening.
"""
if picks is None:
return
elif len(picks) == 0:
raise ValueError('No channels provided to ICA')
types = _DATA_CH_TYPES_SPLIT + ['eog']
chs = list(set([channel_type(info, j) for j in picks]))
check = all([ch in types for ch in chs])
if not check:
raise ValueError('Invalid channel type(s) passed for ICA.\n'
'Only the following channels are supported {0}\n'
'Following types were passed {1}\n'
.format(types, chs))
class ICA(ContainsMixin):
u"""M/EEG signal decomposition using Independent Component Analysis (ICA).
This object can be used to estimate ICA components and then remove some
from Raw or Epochs for data exploration or artifact correction.
Caveat! If supplying a noise covariance, keep track of the projections
available in the cov or in the raw object. For example, if you are
interested in EOG or ECG artifacts, EOG and ECG projections should be
temporally removed before fitting ICA, for example::
>> projs, raw.info['projs'] = raw.info['projs'], []
>> ica.fit(raw)
>> raw.info['projs'] = projs
.. note:: Methods currently implemented are FastICA (default), Infomax,
Extended Infomax, and Picard. Infomax can be quite sensitive to
differences in floating point arithmetic. Extended Infomax seems
to be more stable in this respect enhancing reproducibility and
stability of results.
.. warning:: ICA is sensitive to low-frequency drifts and therefore
requires the data to be high-pass filtered prior to fitting.
Typically, a cutoff frequency of 1 Hz is recommended.
Parameters
----------
n_components : int | float | None
The number of components used for ICA decomposition. If int, it must be
smaller than `max_pca_components`. If None, all PCA components will
be used. If float between 0 and 1, components will be selected by the
cumulative percentage of explained variance.
max_pca_components : int | None
The number of components used for PCA decomposition. If None, no
dimensionality reduction will be applied and `max_pca_components` will
equal the number of channels supplied for decomposing data.
n_pca_components : int | float
The number of PCA components used after ICA recomposition. The ensuing
attribute `n_components_` allows to balance noise reduction against
potential loss of information due to dimensionality reduction. If
greater than `n_components_`, the next `n_pca_components` minus
`n_components_` PCA components will be added before restoring the
sensor space data. If float, the number of components selected matches
the number of components with a cumulative explained variance below
`n_pca_components`.
noise_cov : None | instance of mne.cov.Covariance
Noise covariance used for pre-whitening. If None, channels are scaled
to unit variance prior to whitening.
random_state : None | int | instance of np.random.RandomState
Random state to initialize ICA estimation for reproducible results.
method : {'fastica', 'infomax', 'extended-infomax', 'picard'}
The ICA method to use. Defaults to 'fastica'. For reference, see [1]_,
[2]_, [3]_ and [4]_.
fit_params : dict | None
Additional parameters passed to the ICA estimator as specified by
`method`.
max_iter : int
Maximum number of iterations during fit.
verbose : bool | str | int | None
If not None, override default verbosity level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>`).
Attributes
----------
current_fit : str
Flag informing about which data type (raw or epochs) was used for the
fit.
ch_names : list-like
Channel names resulting from initial picking.
n_components_ : int
If fit, the actual number of components used for ICA decomposition.
pre_whitener_ : ndarray, shape (n_channels, 1)
If fit, array used to pre-whiten the data prior to PCA.
pca_components_ : ndarray, shape (`n_components_`, n_channels)
If fit, the PCA components.
pca_mean_ : ndarray, shape (n_channels,)
If fit, the mean vector used to center the data before doing the PCA.
pca_explained_variance_ : ndarray, shape (`n_components_`,)
If fit, the variance explained by each PCA component
mixing_matrix_ : ndarray, shape (`n_components_`, `n_components_`)
If fit, the mixing matrix to restore observed data.
unmixing_matrix_ : ndarray, shape (`n_components_`, `n_components_`)
If fit, the matrix to unmix observed data.
exclude : list
List of sources indices to exclude, i.e. artifact components identified
throughout the ICA solution. To scrap all marked components, you can
set this attribute to an empty list.
info : None | instance of Info
The measurement info copied from the object fitted.
n_samples_ : int
The number of samples used on fit.
labels_ : dict
A dictionary of independent component indices, grouped by types of
independent components. This attribute is set by some of the artifact
detection functions.
Notes
-----
Reducing the tolerance speeds up estimation at the cost of consistency of
the obtained results. It is difficult to directly compare tolerance levels
between Infomax and Picard, but for Picard and FastICA a good rule of thumb
is ``tol_fastica = tol_picard ** 2``.
References
----------
.. [1] Hyvärinen, A., 1999. Fast and robust fixed-point algorithms for
independent component analysis. IEEE transactions on Neural
Networks, 10(3), pp.626-634.
.. [2] Bell, A.J., Sejnowski, T.J., 1995. An information-maximization
approach to blind separation and blind deconvolution. Neural
computation, 7(6), pp.1129-1159.
.. [3] Lee, T.W., Girolami, M., Sejnowski, T.J., 1999. Independent
component analysis using an extended infomax algorithm for mixed
subgaussian and supergaussian sources. Neural computation, 11(2),
pp.417-441.
.. [4] Ablin, P., Cardoso, J.F., Gramfort, A., 2017. Faster Independent
Component Analysis by preconditioning with Hessian approximations.
arXiv:1706.08171
"""
@verbose
def __init__(self, n_components=None, max_pca_components=None,
n_pca_components=None, noise_cov=None, random_state=None,
method='fastica', fit_params=None, max_iter=200,
verbose=None): # noqa: D102
methods = ('fastica', 'infomax', 'extended-infomax', 'picard')
if method not in methods:
raise ValueError('`method` must be "%s". You passed: "%s"' %
('" or "'.join(methods), method))
if not check_version('sklearn', '0.15'):
raise RuntimeError('the scikit-learn package (version >= 0.15) '
'is required for ICA')
self.noise_cov = noise_cov
if (n_components is not None and
max_pca_components is not None and
n_components > max_pca_components):
raise ValueError('n_components must be smaller than '
'max_pca_components')
if isinstance(n_components, float) \
and not 0 < n_components <= 1:
raise ValueError('Selecting ICA components by explained variance '
'needs values between 0.0 and 1.0 ')
self.current_fit = 'unfitted'
self.verbose = verbose
self.n_components = n_components
self.max_pca_components = max_pca_components
self.n_pca_components = n_pca_components
self.ch_names = None
self.random_state = random_state
if fit_params is None:
fit_params = {}
fit_params = deepcopy(fit_params) # avoid side effects
if "extended" in fit_params:
raise ValueError("'extended' parameter provided. You should "
"rather use method='extended-infomax'.")
if method == 'fastica':
update = {'algorithm': 'parallel', 'fun': 'logcosh',
'fun_args': None}
fit_params.update(dict((k, v) for k, v in update.items() if k
not in fit_params))
elif method == 'infomax':
fit_params.update({'extended': False})
elif method == 'extended-infomax':
fit_params.update({'extended': True})
elif method == 'picard':
update = {'ortho': True, 'fun': 'tanh', 'tol': 1e-5}
fit_params.update(dict((k, v) for k, v in update.items() if k
not in fit_params))
if 'max_iter' not in fit_params:
fit_params['max_iter'] = max_iter
self.max_iter = max_iter
self.fit_params = fit_params
self.exclude = []
self.info = None
self.method = method
self.labels_ = dict()
def __repr__(self):
"""ICA fit information."""
if self.current_fit == 'unfitted':
s = 'no'
elif self.current_fit == 'raw':
s = 'raw data'
else:
s = 'epochs'
s += ' decomposition, '
s += 'fit (%s): %s samples, ' % (self.method,
str(getattr(self, 'n_samples_', '')))
s += ('%s components' % str(self.n_components_) if
hasattr(self, 'n_components_') else
'no dimension reduction')
if self.info is not None:
ch_fit = ['"%s"' % c for c in _DATA_CH_TYPES_SPLIT if c in self]
s += ', channels used: {0}'.format('; '.join(ch_fit))
if self.exclude:
s += ', %i sources marked for exclusion' % len(self.exclude)
return '<ICA | %s>' % s
@verbose
def fit(self, inst, picks=None, start=None, stop=None, decim=None,
reject=None, flat=None, tstep=2.0, reject_by_annotation=True,
verbose=None):
"""Run the ICA decomposition on raw data.
Caveat! If supplying a noise covariance keep track of the projections
available in the cov, the raw or the epochs object. For example,
if you are interested in EOG or ECG artifacts, EOG and ECG projections
should be temporally removed before fitting the ICA.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Raw measurements to be decomposed.
picks : array-like of int
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
decim : int | None
Increment for selecting each nth time slice. If None, all samples
within ``start`` and ``stop`` are used.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg',
'hbo', 'hbr'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
It only applies if `inst` is of type Raw.
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg',
'hbo', 'hbr'.
Values are floats that set the minimum acceptable peak-to-peak
amplitude. If flat is None then no rejection is done.
It only applies if `inst` is of type Raw.
tstep : float
Length of data chunks for artifact rejection in seconds.
It only applies if `inst` is of type Raw.
reject_by_annotation : bool
Whether to omit bad segments from the data before fitting. If True,
annotated segments with a description that starts with 'bad' are
omitted. Has no effect if ``inst`` is an Epochs or Evoked object.
Defaults to True.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
Returns
-------
self : instance of ICA
Returns the modified instance.
"""
if isinstance(inst, (BaseRaw, BaseEpochs)):
_check_for_unsupported_ica_channels(picks, inst.info)
t_start = time()
if isinstance(inst, BaseRaw):
self._fit_raw(inst, picks, start, stop, decim, reject, flat,
tstep, reject_by_annotation, verbose)
elif isinstance(inst, BaseEpochs):
self._fit_epochs(inst, picks, decim, verbose)
else:
raise ValueError('Data input must be of Raw or Epochs type')
# sort ICA components by explained variance
var = _ica_explained_variance(self, inst)
var_ord = var.argsort()[::-1]
_sort_components(self, var_ord, copy=False)
t_stop = time()
logger.info("Fitting ICA took {:.1f}s.".format(t_stop - t_start))
return self
def _reset(self):
"""Aux method."""
del self.pre_whitener_
del self.unmixing_matrix_
del self.mixing_matrix_
del self.n_components_
del self.n_samples_
del self.pca_components_
del self.pca_explained_variance_
del self.pca_mean_
if hasattr(self, 'drop_inds_'):
del self.drop_inds_
def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,
reject_by_annotation, verbose):
"""Aux method."""
if self.current_fit != 'unfitted':
self._reset()
if picks is None: # just use good data channels
picks = _pick_data_channels(raw.info, exclude='bads',
with_ref_meg=False)
logger.info('Fitting ICA to data using %i channels '
'(please be patient, this may take a while)' % len(picks))
if self.max_pca_components is None:
self.max_pca_components = len(picks)
logger.info('Inferring max_pca_components from picks')
info = raw.info.copy()
if info['comps']:
info['comps'] = []
self.info = pick_info(info, picks)
self.ch_names = self.info['ch_names']
start, stop = _check_start_stop(raw, start, stop)
reject_by_annotation = 'omit' if reject_by_annotation else None
# this will be a copy
data = raw.get_data(picks, start, stop, reject_by_annotation)
# this will be a view
if decim is not None:
data = data[:, ::decim]
# this will make a copy
if (reject is not None) or (flat is not None):
data, self.drop_inds_ = _reject_data_segments(data, reject, flat,
decim, self.info,
tstep)
self.n_samples_ = data.shape[1]
# this may operate inplace or make a copy
data, self.pre_whitener_ = self._pre_whiten(data, info, picks)
self._fit(data, self.max_pca_components, 'raw')
return self
def _fit_epochs(self, epochs, picks, decim, verbose):
"""Aux method."""
if self.current_fit != 'unfitted':
self._reset()
if picks is None:
picks = _pick_data_channels(epochs.info, exclude='bads',
with_ref_meg=False)
logger.info('Fitting ICA to data using %i channels '
'(please be patient, this may take a while)' % len(picks))
# filter out all the channels the raw wouldn't have initialized
info = epochs.info.copy()
if info['comps']:
info['comps'] = []
self.info = pick_info(info, picks)
self.ch_names = self.info['ch_names']
if self.max_pca_components is None:
self.max_pca_components = len(picks)
logger.info('Inferring max_pca_components from picks')
# this should be a copy (picks a list of int)
data = epochs.get_data()[:, picks]
# this will be a view
if decim is not None:
data = data[:, :, ::decim]
self.n_samples_ = np.prod(data[:, 0, :].shape)
# This will make at least one copy (one from hstack, maybe one
# more from _pre_whiten)
data, self.pre_whitener_ = \
self._pre_whiten(np.hstack(data), info, picks)
self._fit(data, self.max_pca_components, 'epochs')
return self
def _pre_whiten(self, data, info, picks):
"""Aux function."""
has_pre_whitener = hasattr(self, 'pre_whitener_')
if not has_pre_whitener and self.noise_cov is None:
# use standardization as whitener
# Scale (z-score) the data by channel type
info = pick_info(info, picks)
pre_whitener = np.empty([len(data), 1])
for ch_type in _DATA_CH_TYPES_SPLIT + ['eog']:
if _contains_ch_type(info, ch_type):
if ch_type == 'seeg':
this_picks = pick_types(info, meg=False, seeg=True)
elif ch_type == 'ecog':
this_picks = pick_types(info, meg=False, ecog=True)
elif ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
elif ch_type in ('mag', 'grad'):
this_picks = pick_types(info, meg=ch_type)
elif ch_type == 'eog':
this_picks = pick_types(info, meg=False, eog=True)
elif ch_type in ('hbo', 'hbr'):
this_picks = pick_types(info, meg=False, fnirs=ch_type)
else:
raise RuntimeError('Should not be reached.'
'Unsupported channel {0}'
.format(ch_type))
pre_whitener[this_picks] = np.std(data[this_picks])
data /= pre_whitener
elif not has_pre_whitener and self.noise_cov is not None:
pre_whitener, _ = compute_whitener(self.noise_cov, info, picks)
assert data.shape[0] == pre_whitener.shape[1]
data = np.dot(pre_whitener, data)
elif has_pre_whitener and self.noise_cov is None:
data /= self.pre_whitener_
pre_whitener = self.pre_whitener_
else:
data = np.dot(self.pre_whitener_, data)
pre_whitener = self.pre_whitener_
return data, pre_whitener
def _fit(self, data, max_pca_components, fit_type):
"""Aux function."""
random_state = check_random_state(self.random_state)
from sklearn.decomposition import PCA
if not check_version('sklearn', '0.18'):
pca = PCA(n_components=max_pca_components, whiten=True, copy=True)
else:
pca = PCA(n_components=max_pca_components, whiten=True, copy=True,
svd_solver='full')
data = pca.fit_transform(data.T)
if isinstance(self.n_components, float):
n_components_ = np.sum(pca.explained_variance_ratio_.cumsum() <=
self.n_components)
if n_components_ < 1:
raise RuntimeError('One PCA component captures most of the '
'explained variance, your threshold resu'
'lts in 0 components. You should select '
'a higher value.')
logger.info('Selection by explained variance: %i components' %
n_components_)
sel = slice(n_components_)
else:
if self.n_components is not None: # normal n case
sel = slice(self.n_components)
logger.info('Selection by number: %i components' %
self.n_components)
else: # None case
logger.info('Using all PCA components: %i'
% len(pca.components_))
sel = slice(len(pca.components_))
# the things to store for PCA
self.pca_mean_ = pca.mean_
self.pca_components_ = pca.components_
self.pca_explained_variance_ = exp_var = pca.explained_variance_
if not check_version('sklearn', '0.16'):
# sklearn < 0.16 did not apply whitening to the components, so we
# need to do this manually
self.pca_components_ *= np.sqrt(exp_var[:, None])
del pca
# update number of components
self.n_components_ = sel.stop
self._update_ica_names()
if self.n_pca_components is not None:
if self.n_pca_components > len(self.pca_components_):
self.n_pca_components = len(self.pca_components_)
# take care of ICA
if self.method == 'fastica':
from sklearn.decomposition import FastICA
ica = FastICA(whiten=False, random_state=random_state,
**self.fit_params)
ica.fit(data[:, sel])
self.unmixing_matrix_ = ica.components_
elif self.method in ('infomax', 'extended-infomax'):
self.unmixing_matrix_ = infomax(data[:, sel],
random_state=random_state,
**self.fit_params)
elif self.method == 'picard':
from picard import picard
_, W, _ = picard(data[:, sel].T, whiten=False,
random_state=random_state, **self.fit_params)
del _
self.unmixing_matrix_ = W
self.unmixing_matrix_ /= np.sqrt(exp_var[sel])[None, :] # whitening
self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
self.current_fit = fit_type
def _update_ica_names(self):
"""Update ICA names when n_components_ is set."""
self._ica_names = ['ICA%03d' % ii for ii in range(self.n_components_)]
def _transform(self, data):
"""Compute sources from data (operates inplace)."""
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
# Apply first PCA
pca_data = np.dot(self.pca_components_[:self.n_components_], data)
# Apply unmixing to low dimension PCA
sources = np.dot(self.unmixing_matrix_, pca_data)
return sources
def _transform_raw(self, raw, start, stop, reject_by_annotation=False):
"""Transform raw data."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Raw doesn\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Raw compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
if reject_by_annotation:
data = raw.get_data(picks, start, stop, 'omit')
else:
data = raw[picks, start:stop][0]
# remove comp matrices
info = raw.info.copy()
if info['comps']:
info['comps'] = []
data, _ = self._pre_whiten(data, info, picks)
return self._transform(data)
def _transform_epochs(self, epochs, concatenate):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = pick_types(epochs.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
# remove comp matrices
info = epochs.info.copy()
if info['comps']:
info['comps'] = []
data = np.hstack(epochs.get_data()[:, picks])
data, _ = self._pre_whiten(data, info, picks)
sources = self._transform(data)
if not concatenate:
# Put the data back in 3D
sources = np.array(np.split(sources, len(epochs.events), 1))
return sources
def _transform_evoked(self, evoked):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = pick_types(evoked.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked doesn\'t match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide Evoked compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
# remove comp matrices
info = evoked.info.copy()
if info['comps']:
info['comps'] = []
data, _ = self._pre_whiten(evoked.data[picks], info, picks)
sources = self._transform(data)
return sources
def get_components(self):
"""Get ICA topomap for components as numpy arrays.
Returns
-------
components : array, shape (n_channels, n_components)
The ICA components (maps).
"""
return np.dot(self.mixing_matrix_[:, :self.n_components_].T,
self.pca_components_[:self.n_components_]).T
def get_sources(self, inst, add_channels=None, start=None, stop=None):
"""Estimate sources given the unmixing matrix.
This method will return the sources in the container format passed.
Typical usecases:
1. pass Raw object to use `raw.plot` for ICA sources
2. pass Epochs object to compute trial-based statistics in ICA space
3. pass Evoked object to investigate time-locking in ICA space
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from and to represent sources in.
add_channels : None | list of str
Additional channels to be added. Useful to e.g. compare sources
with some reference. Defaults to None
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
Returns
-------
sources : instance of Raw, Epochs or Evoked
The ICA sources time series.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self, inst, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._sources_as_raw(inst, add_channels, start, stop)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self, inst, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._sources_as_epochs(inst, add_channels, False)
elif isinstance(inst, Evoked):
_check_compensation_grade(self, inst, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._sources_as_evoked(inst, add_channels)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
return sources
def _sources_as_raw(self, raw, add_channels, start, stop):
"""Aux method."""
# merge copied instance and picked data with sources
sources = self._transform_raw(raw, start=start, stop=stop)
if raw.preload: # get data and temporarily delete
data = raw._data
del raw._data
out = raw.copy() # copy and reappend
if raw.preload:
raw._data = data
# populate copied raw.
start, stop = _check_start_stop(raw, start, stop)
if add_channels is not None:
raw_picked = raw.copy().pick_channels(add_channels)
data_, times_ = raw_picked[:, start:stop]
data_ = np.r_[sources, data_]
else:
data_ = sources
_, times_ = raw[0, start:stop]
out._data = data_
out._times = times_
out._filenames = [None]
out.preload = True
# update first and last samples
out._first_samps = np.array([raw.first_samp +
(start if start else 0)])
out._last_samps = np.array([out.first_samp + stop
if stop else raw.last_samp])
out._projector = None
self._export_info(out.info, raw, add_channels)
out._update_times()
return out
def _sources_as_epochs(self, epochs, add_channels, concatenate):
"""Aux method."""
out = epochs.copy()
sources = self._transform_epochs(epochs, concatenate)
if add_channels is not None:
picks = [epochs.ch_names.index(k) for k in add_channels]
else:
picks = []
out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
axis=1) if len(picks) > 0 else sources
self._export_info(out.info, epochs, add_channels)
out.preload = True
out._raw = None
out._projector = None
return out
def _sources_as_evoked(self, evoked, add_channels):
"""Aux method."""
if add_channels is not None:
picks = [evoked.ch_names.index(k) for k in add_channels]
else:
picks = []
sources = self._transform_evoked(evoked)
if len(picks) > 1:
data = np.r_[sources, evoked.data[picks]]
else:
data = sources
out = evoked.copy()
out.data = data
self._export_info(out.info, evoked, add_channels)
return out
def _export_info(self, info, container, add_channels):
"""Aux method."""
# set channel names and info
ch_names = []
ch_info = info['chs'] = []
for ii, name in enumerate(self._ica_names):
ch_names.append(name)
ch_info.append(dict(
ch_name=name, cal=1, logno=ii + 1,
coil_type=FIFF.FIFFV_COIL_NONE, kind=FIFF.FIFFV_MISC_CH,
coord_Frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_NONE,
loc=np.array([0., 0., 0., 1.] * 3, dtype='f4'),
range=1.0, scanno=ii + 1, unit_mul=0))
if add_channels is not None:
# re-append additionally picked ch_names
ch_names += add_channels
# re-append additionally picked ch_info
ch_info += [k for k in container.info['chs'] if k['ch_name'] in
add_channels]
info['bads'] = [ch_names[k] for k in self.exclude]
info['projs'] = [] # make sure projections are removed.
info._update_redundant()
info._check_consistency()
@verbose
def score_sources(self, inst, target=None, score_func='pearsonr',
start=None, stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, verbose=None):
"""Assign score to components based on statistic or metric.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The object to reconstruct the sources from.
target : array-like | ch_name | None
Signal to which the sources shall be compared. It has to be of
the same shape as the sources. If some string is supplied, a
routine will try to find a matching channel. If None, a score
function expecting only one input-array argument must be used,
for instance, scipy.stats.skew (default).
score_func : callable | str label
Callable taking as arguments either two input arrays
(e.g. Pearson correlation) or one input
array (e. g. skewness) and returns a float. For convenience the
most common score_funcs are available via string labels:
Currently, all distance metrics from scipy.spatial and All
functions from scipy.stats taking compatible input arguments are
supported. These function have been modified to support iteration
over the rows of a 2D array.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
reject_by_annotation : bool
If True, data annotated as bad will be omitted. Defaults to True.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
Returns
-------
scores : ndarray
scores for each source as returned from score_func
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self, inst, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._transform_raw(inst, start, stop,
reject_by_annotation)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self, inst, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._transform_epochs(inst, concatenate=True)
elif isinstance(inst, Evoked):
_check_compensation_grade(self, inst, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._transform_evoked(inst)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
if target is not None: # we can have univariate metrics without target
target = self._check_target(target, inst, start, stop,
reject_by_annotation)
if sources.shape[-1] != target.shape[-1]:
raise ValueError('Sources and target do not have the same'
'number of time slices.')
# auto target selection
if verbose is None:
verbose = self.verbose
if isinstance(inst, BaseRaw):
sources, target = _band_pass_filter(self, sources, target,
l_freq, h_freq, verbose)
scores = _find_sources(sources, target, score_func)
return scores
def _check_target(self, target, inst, start, stop,
reject_by_annotation=False):
"""Aux Method."""
if isinstance(inst, BaseRaw):
reject_by_annotation = 'omit' if reject_by_annotation else None
start, stop = _check_start_stop(inst, start, stop)
if hasattr(target, 'ndim'):
if target.ndim < 2:
target = target.reshape(1, target.shape[-1])
if isinstance(target, string_types):
pick = _get_target_ch(inst, target)
target = inst.get_data(pick, start, stop, reject_by_annotation)
elif isinstance(inst, BaseEpochs):
if isinstance(target, string_types):
pick = _get_target_ch(inst, target)
target = inst.get_data()[:, pick]
if hasattr(target, 'ndim'):
if target.ndim == 3 and min(target.shape) == 1:
target = target.ravel()
elif isinstance(inst, Evoked):
if isinstance(target, string_types):
pick = _get_target_ch(inst, target)
target = inst.data[pick]
return target
@verbose
def find_bads_ecg(self, inst, ch_name=None, threshold=None, start=None,
stop=None, l_freq=8, h_freq=16, method='ctps',
reject_by_annotation=True, verbose=None):
"""Detect ECG related components using correlation.
.. note:: If no ECG channel is available, routine attempts to create
an artificial ECG based on cross-channel averaging.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for ECG peak detection.
The argument is mandatory if the dataset contains no ECG
channels.
threshold : float
The value above which a feature is classified as outlier. If
method is 'ctps', defaults to 0.25, else defaults to 3.0.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
method : {'ctps', 'correlation'}
The method used for detection. If 'ctps', cross-trial phase
statistics [1] are used to detect ECG related components.
Thresholding is then based on the significance value of a Kuiper
statistic.
If 'correlation', detection is based on Pearson correlation
between the filtered data and the filtered ECG channel.
Thresholding is based on iterative z-scoring. The above
threshold components will be masked and the z-score will
be recomputed until no supra-threshold component remains.
Defaults to 'ctps'.
reject_by_annotation : bool
If True, data annotated as bad will be omitted. Defaults to True.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
Returns
-------
ecg_idx : list of int
The indices of ECG related components.
scores : np.ndarray of float, shape (``n_components_``)
The correlation scores.
See Also
--------
find_bads_eog
References
----------
[1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
and phase statistics for complete artifact removal in independent
components of neuromagnetic recordings. Biomedical
Engineering, IEEE Transactions on 55 (10), 2353-2362.
"""
if verbose is None:
verbose = self.verbose
idx_ecg = _get_ecg_channel_index(ch_name, inst)
if idx_ecg is None:
if verbose is not None:
verbose = self.verbose
ecg, times = _make_ecg(inst, start, stop,
reject_by_annotation=reject_by_annotation,
verbose=verbose)
else:
ecg = inst.ch_names[idx_ecg]
if method == 'ctps':
if threshold is None:
threshold = 0.25
if isinstance(inst, BaseRaw):
sources = self.get_sources(create_ecg_epochs(
inst, ch_name, keep_ecg=False,
reject_by_annotation=reject_by_annotation)).get_data()
if sources.shape[0] == 0:
warn('No ECG activity detected. Consider changing '
'the input parameters.')
elif isinstance(inst, BaseEpochs):
sources = self.get_sources(inst).get_data()
else:
raise ValueError('With `ctps` only Raw and Epochs input is '
'supported')
_, p_vals, _ = ctps(sources)
scores = p_vals.max(-1)
ecg_idx = np.where(scores >= threshold)[0]
elif method == 'correlation':
if threshold is None:
threshold = 3.0
scores = self.score_sources(
inst, target=ecg, score_func='pearsonr', start=start,
stop=stop, l_freq=l_freq, h_freq=h_freq,
reject_by_annotation=reject_by_annotation, verbose=verbose)
ecg_idx = find_outliers(scores, threshold=threshold)
else:
raise ValueError('Method "%s" not supported.' % method)
# sort indices by scores
ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
self.labels_['ecg'] = list(ecg_idx)
if ch_name is None:
ch_name = 'ECG-MAG'
self.labels_['ecg/%s' % ch_name] = list(ecg_idx)
return self.labels_['ecg'], scores
@verbose
def find_bads_eog(self, inst, ch_name=None, threshold=3.0, start=None,
stop=None, l_freq=1, h_freq=10,
reject_by_annotation=True, verbose=None):
"""Detect EOG related components using correlation.
Detection is based on Pearson correlation between the
filtered data and the filtered EOG channel.
Thresholding is based on adaptive z-scoring. The above threshold
components will be masked and the z-score will be recomputed
until no supra-threshold component remains.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG
channels.
threshold : int | float
The value above which a feature is classified as outlier.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
reject_by_annotation : bool
If True, data annotated as bad will be omitted. Defaults to True.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see
:func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
for more). Defaults to self.verbose.
Returns
-------
eog_idx : list of int
The indices of EOG related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg
"""
if verbose is None:
verbose = self.verbose
eog_inds = _get_eog_channel_index(ch_name, inst)
if len(eog_inds) > 2:
eog_inds = eog_inds[:1]
logger.info('Using EOG channel %s' % inst.ch_names[eog_inds[0]])
scores, eog_idx = [], []
eog_chs = [inst.ch_names[k] for k in eog_inds]
# some magic we need inevitably ...
# get targets before equalizing
targets = [self._check_target(k, inst, start, stop,
reject_by_annotation) for k in eog_chs]
for ii, (eog_ch, target) in enumerate(zip(eog_chs, targets)):
scores += [self.score_sources(
inst, target=target, score_func='pearsonr', start=start,
stop=stop, l_freq=l_freq, h_freq=h_freq, verbose=verbose,
reject_by_annotation=reject_by_annotation)]
# pick last scores
this_idx = find_outliers(scores[-1], threshold=threshold)
eog_idx += [this_idx]
self.labels_[('eog/%i/' % ii) + eog_ch] = list(this_idx)
# remove duplicates but keep order by score, even across multiple
# EOG channels
scores_ = np.concatenate([scores[ii][inds]
for ii, inds in enumerate(eog_idx)])
eog_idx_ = np.concatenate(eog_idx)[np.abs(scores_).argsort()[::-1]]
eog_idx_unique = list(np.unique(eog_idx_))
eog_idx = []
for i in eog_idx_:
if i in eog_idx_unique:
eog_idx.append(i)
eog_idx_unique.remove(i)
if len(scores) == 1:
scores = scores[0]
self.labels_['eog'] = list(eog_idx)
return self.labels_['eog'], scores
def apply(self, inst, include=None, exclude=None, n_pca_components=None,
start=None, stop=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform data,
zero out components, and inverse transform the data.
This procedure will reconstruct M/EEG signals from which
the dynamics described by the excluded components is subtracted.
The data is processed in place.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The data to be processed. The instance is modified inplace.
include : array_like of int.
The indices referring to columns in the ummixing matrix. The
components to be kept.
exclude : array_like of int.
The indices referring to columns in the ummixing matrix. The
components to be zeroed out.
n_pca_components : int | float | None
The number of PCA components to be kept, either absolute (int)
or percentage of the explained variance (float). If None (default),
all PCA components will be used.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
Returns
-------
out : instance of Raw, Epochs or Evoked
The processed data.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self, inst, 'ICA', 'Raw',
ch_names=self.ch_names)
out = self._apply_raw(raw=inst, include=include,
exclude=exclude,
n_pca_components=n_pca_components,
start=start, stop=stop)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self, inst, 'ICA', 'Epochs',
ch_names=self.ch_names)
out = self._apply_epochs(epochs=inst, include=include,
exclude=exclude,
n_pca_components=n_pca_components)
elif isinstance(inst, Evoked):
_check_compensation_grade(self, inst, 'ICA', 'Evoked',
ch_names=self.ch_names)
out = self._apply_evoked(evoked=inst, include=include,
exclude=exclude,
n_pca_components=n_pca_components)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
return out
def _check_exclude(self, exclude):
if exclude is None:
return list(set(self.exclude))
else:
return list(set(self.exclude + exclude))
def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop):
"""Aux method."""
_check_preload(raw, "ica.apply")
exclude = self._check_exclude(exclude)
if n_pca_components is not None:
self.n_pca_components = n_pca_components
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, meg=False, include=self.ch_names,
exclude='bads', ref_meg=False)
data = raw[picks, start:stop][0]
data, _ = self._pre_whiten(data, raw.info, picks)
data = self._pick_sources(data, include, exclude)
raw[picks, start:stop] = data
return raw
def _apply_epochs(self, epochs, include, exclude, n_pca_components):
"""Aux method."""
_check_preload(epochs, "ica.apply")
exclude = self._check_exclude(exclude)
picks = pick_types(epochs.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
if n_pca_components is not None:
self.n_pca_components = n_pca_components
data = np.hstack(epochs.get_data()[:, picks])
data, _ = self._pre_whiten(data, epochs.info, picks)
data = self._pick_sources(data, include=include, exclude=exclude)
# restore epochs, channels, tsl order
epochs._data[:, picks] = np.array(np.split(data,
len(epochs.events), 1))
epochs.preload = True
return epochs
def _apply_evoked(self, evoked, include, exclude, n_pca_components):
"""Aux method."""
exclude = self._check_exclude(exclude)
picks = pick_types(evoked.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where evoked come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked does not match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide an Evoked object that\'s compatible '
'with ica.ch_names' % (len(self.ch_names),
len(picks)))
if n_pca_components is not None:
self.n_pca_components = n_pca_components
data = evoked.data[picks]
data, _ = self._pre_whiten(data, evoked.info, picks)
data = self._pick_sources(data, include=include,
exclude=exclude)
# restore evoked
evoked.data[picks] = data
return evoked
def _pick_sources(self, data, include, exclude):
"""Aux function."""
if exclude is None:
exclude = self.exclude
else:
exclude = list(set(self.exclude + list(exclude)))
_n_pca_comp = self._check_n_pca_components(self.n_pca_components)
if not(self.n_components_ <= _n_pca_comp <= self.max_pca_components):
raise ValueError('n_pca_components must be >= '
'n_components and <= max_pca_components.')
n_components = self.n_components_
logger.info('Transforming to ICA space (%i components)' % n_components)
# Apply first PCA
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
sel_keep = np.arange(n_components)
if include not in (None, []):
sel_keep = np.unique(include)
elif exclude not in (None, []):
sel_keep = np.setdiff1d(np.arange(n_components), exclude)
logger.info('Zeroing out %i ICA components'
% (n_components - len(sel_keep)))
unmixing = np.eye(_n_pca_comp)
unmixing[:n_components, :n_components] = self.unmixing_matrix_
unmixing = np.dot(unmixing, self.pca_components_[:_n_pca_comp])
mixing = np.eye(_n_pca_comp)
mixing[:n_components, :n_components] = self.mixing_matrix_
mixing = np.dot(self.pca_components_[:_n_pca_comp].T, mixing)
if _n_pca_comp > n_components:
sel_keep = np.concatenate(
(sel_keep, range(n_components, _n_pca_comp)))
proj_mat = np.dot(mixing[:, sel_keep], unmixing[sel_keep, :])
data = np.dot(proj_mat, data)
if self.pca_mean_ is not None:
data += self.pca_mean_[:, None]
# restore scaling
if self.noise_cov is None: # revert standardization
data *= self.pre_whitener_
else:
data = np.dot(linalg.pinv(self.pre_whitener_, cond=1e-14), data)
return data
@verbose
def save(self, fname):
"""Store ICA solution into a fiff file.
Parameters
----------
fname : str
The absolute path of the file name to save the ICA solution into.
The file name should end with -ica.fif or -ica.fif.gz.
"""
if self.current_fit == 'unfitted':
raise RuntimeError('No fit available. Please first fit ICA')
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
logger.info('Writing ICA solution to %s...' % fname)
fid = start_file(fname)
try:
_write_ica(fid, self)
end_file(fid)
except Exception:
end_file(fid)
os.remove(fname)
raise
return self
def copy(self):
"""Copy the ICA object.
Returns
-------
ica : instance of ICA
The copied object.
"""
return deepcopy(self)
@copy_function_doc_to_method_doc(plot_ica_components)
def plot_components(self, picks=None, ch_type=None, res=64, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=False, title=None, show=True, outlines='head',
contours=6, image_interp='bilinear', head_pos=None,
inst=None):
return plot_ica_components(self, picks=picks, ch_type=ch_type,
res=res, layout=layout, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, title=title, show=show,
outlines=outlines, contours=contours,
image_interp=image_interp,
head_pos=head_pos, inst=inst)
@copy_function_doc_to_method_doc(plot_ica_properties)
def plot_properties(self, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True):
return plot_ica_properties(self, inst, picks=picks, axes=axes,
dB=dB, plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
figsize=figsize, show=show)
@copy_function_doc_to_method_doc(plot_ica_sources)
def plot_sources(self, inst, picks=None, exclude=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False):
return plot_ica_sources(self, inst=inst, picks=picks, exclude=exclude,
start=start, stop=stop, title=title, show=show,
block=block, show_first_samp=show_first_samp)
@copy_function_doc_to_method_doc(plot_ica_scores)
def plot_scores(self, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None,
show=True):
return plot_ica_scores(
ica=self, scores=scores, exclude=exclude, labels=labels,
axhline=axhline, title=title, figsize=figsize, show=show)
@copy_function_doc_to_method_doc(plot_ica_overlay)
def plot_overlay(self, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True):
return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,
start=start, stop=stop, title=title, show=show)
def detect_artifacts(self, raw, start_find=None, stop_find=None,
ecg_ch=None, ecg_score_func='pearsonr',
ecg_criterion=0.1, eog_ch=None,
eog_score_func='pearsonr',
eog_criterion=0.1, skew_criterion=-1,
kurt_criterion=-1, var_criterion=0,
add_nodes=None):
"""Run ICA artifacts detection workflow.
Note. This is still experimental and will most likely change. Over
the next releases. For maximum control use the workflow exposed in
the examples.
Hints and caveats:
- It is highly recommended to bandpass filter ECG and EOG
data and pass them instead of the channel names as ecg_ch and eog_ch
arguments.
- please check your results. Detection by kurtosis and variance
may be powerful but misclassification of brain signals as
noise cannot be precluded.
- Consider using shorter times for start_find and stop_find than
for start and stop. It can save you much time.
Example invocation (taking advantage of the defaults)::
ica.detect_artifacts(ecg_channel='MEG 1531', eog_channel='EOG 061')
Parameters
----------
raw : instance of Raw
Raw object to draw sources from.
start_find : int | float | None
First sample to include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop_find : int | float | None
Last sample to not include for artifact search. If float, data will
be interpreted as time in seconds. If None, data will be used to
the last sample.
ecg_ch : str | ndarray | None
The `target` argument passed to ica.find_sources_raw. Either the
name of the ECG channel or the ECG time series. If None, this step
will be skipped.
ecg_score_func : str | callable
The `score_func` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
ecg_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
eog_ch : list | str | ndarray | None
The `target` argument or the list of target arguments subsequently
passed to ica.find_sources_raw. Either the name of the vertical EOG
channel or the corresponding EOG time series. If None, this step
will be skipped.
eog_score_func : str | callable
The `score_func` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
eog_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
skew_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
kurt_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
var_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
add_nodes : list of ica_nodes
Additional list if tuples carrying the following parameters:
(name : str, target : str | array, score_func : callable,
criterion : float | int | list-like | slice). This parameter is a
generalization of the artifact specific parameters above and has
the same structure. Example:
add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
Returns
-------
self : instance of ICA
The ICA object with the detected artifact indices marked for
exclusion
"""
logger.info(' Searching for artifacts...')
_detect_artifacts(self, raw=raw, start_find=start_find,
stop_find=stop_find, ecg_ch=ecg_ch,
ecg_score_func=ecg_score_func,
ecg_criterion=ecg_criterion,
eog_ch=eog_ch, eog_score_func=eog_score_func,
eog_criterion=eog_criterion,
skew_criterion=skew_criterion,
kurt_criterion=kurt_criterion,
var_criterion=var_criterion,
add_nodes=add_nodes)
return self
@verbose
def _check_n_pca_components(self, _n_pca_comp, verbose=None):
"""Aux function."""
if isinstance(_n_pca_comp, float):
_n_pca_comp = ((self.pca_explained_variance_ /
self.pca_explained_variance_.sum()).cumsum() <=
_n_pca_comp).sum()
logger.info('Selected %i PCA components by explained '
'variance' % _n_pca_comp)
elif _n_pca_comp is None:
_n_pca_comp = self.max_pca_components
elif _n_pca_comp < self.n_components_:
_n_pca_comp = self.n_components_
return _n_pca_comp
def _check_start_stop(raw, start, stop):
"""Aux function."""
out = list()
for st in (start, stop):
if st is None:
out.append(st)
else:
try:
out.append(_ensure_int(st))
except TypeError: # not int-like
out.append(raw.time_as_index(st)[0])
return out
@verbose
def ica_find_ecg_events(raw, ecg_source, event_id=999,
tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto',
verbose=None):
"""Find ECG peaks from one selected ICA source.
Parameters
----------
raw : instance of Raw
Raw object to draw sources from.
ecg_source : ndarray
ICA source resembling ECG to find peaks from.
event_id : int
The index to assign to found events.
tstart : float
Start detection after tstart seconds. Useful when beginning
of run is noisy.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
ecg_events : array
Events.
ch_ECG : string
Name of channel used.
average_pulse : float.
Estimated average pulse.
"""
logger.info('Using ICA source to identify heart beats')
# detecting QRS and generating event file
ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),
tstart=tstart, thresh_value=qrs_threshold,
l_freq=l_freq, h_freq=h_freq)
n_events = len(ecg_events)
ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
event_id * np.ones(n_events)]
return ecg_events
@verbose
def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
h_freq=10, verbose=None):
"""Locate EOG artifacts from one selected ICA source.
Parameters
----------
raw : instance of Raw
The raw data.
eog_source : ndarray
ICA source resembling EOG to find peaks from.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
eog_events : array
Events
"""
eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,
l_freq=l_freq, h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp)
return eog_events
def _get_target_ch(container, target):
"""Aux function."""
# auto target selection
picks = pick_channels(container.ch_names, include=[target])
ref_picks = pick_types(container.info, meg=False, eeg=False, ref_meg=True)
if len(ref_picks) > 0:
picks = list(set(picks) - set(ref_picks))
if len(picks) == 0:
raise ValueError('%s not in channel list (%s)' %
(target, container.ch_names))
return picks
def _find_sources(sources, target, score_func):
"""Aux function."""
if isinstance(score_func, string_types):
score_func = get_score_funcs().get(score_func, score_func)
if not callable(score_func):
raise ValueError('%s is not a valid score_func.' % score_func)
scores = (score_func(sources, target) if target is not None
else score_func(sources, 1))
return scores
def _ica_explained_variance(ica, inst, normalize=False):
"""Check variance accounted for by each component in supplied data.
Parameters
----------
ica : ICA
Instance of `mne.preprocessing.ICA`.
inst : Raw | Epochs | Evoked
Data to explain with ICA. Instance of Raw, Epochs or Evoked.
normalize : bool
Whether to normalize the variance.
Returns
-------
var : array
Variance explained by each component.
"""
# check if ica is ICA and whether inst is Raw or Epochs
if not isinstance(ica, ICA):
raise TypeError('first argument must be an instance of ICA.')
if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):
raise TypeError('second argument must an instance of either Raw, '
'Epochs or Evoked.')
source_data = _get_inst_data(ica.get_sources(inst))
# if epochs - reshape to channels x timesamples
if isinstance(inst, BaseEpochs):
n_epochs, n_chan, n_samp = source_data.shape
source_data = source_data.transpose(1, 0, 2).reshape(
(n_chan, n_epochs * n_samp))
n_chan, n_samp = source_data.shape
var = np.sum(ica.mixing_matrix_ ** 2, axis=0) * np.sum(
source_data ** 2, axis=1) / (n_chan * n_samp - 1)
if normalize:
var /= var.sum()
return var
def _sort_components(ica, order, copy=True):
"""Change the order of components in ica solution."""
assert ica.n_components_ == len(order)
if copy:
ica = ica.copy()
# reorder components
ica.mixing_matrix_ = ica.mixing_matrix_[:, order]
ica.unmixing_matrix_ = ica.unmixing_matrix_[order, :]
# reorder labels, excludes etc.
if isinstance(order, np.ndarray):
order = list(order)
if ica.exclude:
ica.exclude = [order.index(ic) for ic in ica.exclude]
for k in ica.labels_.keys():
ica.labels_[k] = [order.index(ic) for ic in ica.labels_[k]]
return ica
def _serialize(dict_, outer_sep=';', inner_sep=':'):
"""Aux function."""
s = []
for key, value in dict_.items():
if callable(value):
value = value.__name__
elif isinstance(value, Integral):
value = int(value)
elif isinstance(value, dict):
# py35 json does not support numpy int64
for subkey, subvalue in value.items():
if isinstance(subvalue, list):
if len(subvalue) > 0:
if isinstance(subvalue[0], (int, np.integer)):
value[subkey] = [int(i) for i in subvalue]
for cls in (np.random.RandomState, Covariance):
if isinstance(value, cls):
value = cls.__name__
s.append(key + inner_sep + json.dumps(value))
return outer_sep.join(s)
def _deserialize(str_, outer_sep=';', inner_sep=':'):
"""Aux Function."""
out = {}
for mapping in str_.split(outer_sep):
k, v = mapping.split(inner_sep, 1)
vv = json.loads(v)
out[k] = vv if not isinstance(vv, text_type) else str(vv)
return out
def _write_ica(fid, ica):
"""Write an ICA object.
Parameters
----------
fid: file
The file descriptor
ica:
The instance of ICA to write
"""
ica_init = dict(noise_cov=ica.noise_cov,
n_components=ica.n_components,
n_pca_components=ica.n_pca_components,
max_pca_components=ica.max_pca_components,
current_fit=ica.current_fit)
if ica.info is not None:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if ica.info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, ica.info['meas_id'])
# Write measurement info
write_meas_info(fid, ica.info)
end_block(fid, FIFF.FIFFB_MEAS)
start_block(fid, FIFF.FIFFB_MNE_ICA)
# ICA interface params
write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
_serialize(ica_init))
# Channel names
if ica.ch_names is not None:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)
# samples on fit
n_samples = getattr(ica, 'n_samples_', None)
ica_misc = {'n_samples_': (None if n_samples is None else int(n_samples)),
'labels_': getattr(ica, 'labels_', None),
'method': getattr(ica, 'method', None)}
write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
_serialize(ica_init))
# ICA misct params
write_string(fid, FIFF.FIFF_MNE_ICA_MISC_PARAMS,
_serialize(ica_misc))
# Whitener
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica.pre_whitener_)
# PCA components_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,
ica.pca_components_)
# PCA mean_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)
# PCA explained_variance_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
ica.pca_explained_variance_)
# ICA unmixing
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)
# Write bad components
write_int(fid, FIFF.FIFF_MNE_ICA_BADS, ica.exclude)
# Done!
end_block(fid, FIFF.FIFFB_MNE_ICA)
@verbose
def read_ica(fname, verbose=None):
"""Restore ICA solution from fif file.
Parameters
----------
fname : str
Absolute path to fif file containing ICA matrices.
The file name should end with -ica.fif or -ica.fif.gz.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
ica : instance of ICA
The ICA estimator.
"""
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
try:
# we used to store bads that weren't part of the info...
info, meas = read_meas_info(fid, tree, clean_bads=True)
except ValueError:
logger.info('Could not find the measurement info. \n'
'Functionality requiring the info won\'t be'
' available.')
info = None
ica_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ICA)
if len(ica_data) == 0:
ica_data = dir_tree_find(tree, 123) # Constant 123 Used before v 0.11
if len(ica_data) == 0:
fid.close()
raise ValueError('Could not find ICA data')
my_ica_data = ica_data[0]
for d in my_ica_data['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:
tag = read_tag(fid, pos)
ica_init = tag.data
elif kind == FIFF.FIFF_MNE_ROW_NAMES:
tag = read_tag(fid, pos)
ch_names = tag.data
elif kind == FIFF.FIFF_MNE_ICA_WHITENER:
tag = read_tag(fid, pos)
pre_whitener = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:
tag = read_tag(fid, pos)
pca_components = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:
tag = read_tag(fid, pos)
pca_explained_variance = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:
tag = read_tag(fid, pos)
pca_mean = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MATRIX:
tag = read_tag(fid, pos)
unmixing_matrix = tag.data
elif kind == FIFF.FIFF_MNE_ICA_BADS:
tag = read_tag(fid, pos)
exclude = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MISC_PARAMS:
tag = read_tag(fid, pos)
ica_misc = tag.data
fid.close()
ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]
current_fit = ica_init.pop('current_fit')
if ica_init['noise_cov'] == Covariance.__name__:
logger.info('Reading whitener drawn from noise covariance ...')
logger.info('Now restoring ICA solution ...')
# make sure dtypes are np.float64 to satisfy fast_dot
def f(x):
return x.astype(np.float64)
ica_init = dict((k, v) for k, v in ica_init.items()
if k in _get_args(ICA.__init__))
ica = ICA(**ica_init)
ica.current_fit = current_fit
ica.ch_names = ch_names.split(':')
ica.pre_whitener_ = f(pre_whitener)
ica.pca_mean_ = f(pca_mean)
ica.pca_components_ = f(pca_components)
ica.n_components_ = unmixing_matrix.shape[0]
ica._update_ica_names()
ica.pca_explained_variance_ = f(pca_explained_variance)
ica.unmixing_matrix_ = f(unmixing_matrix)
ica.mixing_matrix_ = linalg.pinv(ica.unmixing_matrix_)
ica.exclude = [] if exclude is None else list(exclude)
ica.info = info
if 'n_samples_' in ica_misc:
ica.n_samples_ = ica_misc['n_samples_']
if 'labels_' in ica_misc:
labels_ = ica_misc['labels_']
if labels_ is not None:
ica.labels_ = labels_
if 'method' in ica_misc:
ica.method = ica_misc['method']
logger.info('Ready.')
return ica
_ica_node = namedtuple('Node', 'name target score_func criterion')
def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
ecg_criterion, eog_ch, eog_score_func, eog_criterion,
skew_criterion, kurt_criterion, var_criterion,
add_nodes):
"""Aux Function."""
from scipy import stats
nodes = []
if ecg_ch is not None:
nodes += [_ica_node('ECG', ecg_ch, ecg_score_func, ecg_criterion)]
if eog_ch not in [None, []]:
if not isinstance(eog_ch, list):
eog_ch = [eog_ch]
for idx, ch in enumerate(eog_ch):
nodes += [_ica_node('EOG %02d' % idx, ch, eog_score_func,
eog_criterion)]
if skew_criterion is not None:
nodes += [_ica_node('skewness', None, stats.skew, skew_criterion)]
if kurt_criterion is not None:
nodes += [_ica_node('kurtosis', None, stats.kurtosis, kurt_criterion)]
if var_criterion is not None:
nodes += [_ica_node('variance', None, np.var, var_criterion)]
if add_nodes is not None:
nodes.extend(add_nodes)
for node in nodes:
scores = ica.score_sources(raw, start=start_find, stop=stop_find,
target=node.target,
score_func=node.score_func)
if isinstance(node.criterion, float):
found = list(np.where(np.abs(scores) > node.criterion)[0])
else:
found = list(np.atleast_1d(abs(scores).argsort()[node.criterion]))
case = (len(found), _pl(found), node.name)
logger.info(' found %s artifact%s by %s' % case)
ica.exclude += found
logger.info('Artifact indices found:\n ' + str(ica.exclude).strip('[]'))
if len(set(ica.exclude)) != len(ica.exclude):
logger.info(' Removing duplicate indices...')
ica.exclude = list(set(ica.exclude))
logger.info('Ready.')
@verbose
def run_ica(raw, n_components, max_pca_components=100,
n_pca_components=64, noise_cov=None,
random_state=None, picks=None, start=None, stop=None,
start_find=None, stop_find=None, ecg_ch=None,
ecg_score_func='pearsonr', ecg_criterion=0.1, eog_ch=None,
eog_score_func='pearsonr', eog_criterion=0.1, skew_criterion=-1,
kurt_criterion=-1, var_criterion=0, add_nodes=None, verbose=None,
method='fastica'):
"""Run ICA decomposition on raw data and identify artifact sources.
This function implements an automated artifact removal work flow.
Hints and caveats:
- It is highly recommended to bandpass filter ECG and EOG
data and pass them instead of the channel names as ecg_ch and eog_ch
arguments.
- Please check your results. Detection by kurtosis and variance
can be powerful but misclassification of brain signals as
noise cannot be precluded. If you are not sure set those to None.
- Consider using shorter times for start_find and stop_find than
for start and stop. It can save you much time.
Example invocation (taking advantage of defaults)::
ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,
ecg_ch='MEG 1531', eog_ch='EOG 061')
Parameters
----------
raw : instance of Raw
The raw data to decompose.
n_components : int | float | None
The number of components used for ICA decomposition. If int, it must be
smaller then max_pca_components. If None, all PCA components will be
used. If float between 0 and 1 components can will be selected by the
cumulative percentage of explained variance.
max_pca_components : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data.
n_pca_components
The number of PCA components used after ICA recomposition. The ensuing
attribute allows to balance noise reduction against potential loss of
features due to dimensionality reduction. If greater than
``self.n_components_``, the next ``'n_pca_components'`` minus
``'n_components_'`` PCA components will be added before restoring the
sensor space data. The attribute gets updated each time the according
parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
noise_cov : None | instance of mne.cov.Covariance
Noise covariance used for whitening. If None, channels are just
z-scored.
random_state : None | int | instance of np.random.RandomState
np.random.RandomState to initialize the FastICA estimation.
As the estimation is non-deterministic it can be useful to
fix the seed to have reproducible results.
picks : array-like of int
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
start : int | float | None
First sample to include for decomposition. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop : int | float | None
Last sample to not include for decomposition. If float, data will be
interpreted as time in seconds. If None, data will be used to the
last sample.
start_find : int | float | None
First sample to include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop_find : int | float | None
Last sample to not include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used to the last
sample.
ecg_ch : str | ndarray | None
The ``target`` argument passed to ica.find_sources_raw. Either the
name of the ECG channel or the ECG time series. If None, this step
will be skipped.
ecg_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
ecg_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
eog_ch : list | str | ndarray | None
The ``target`` argument or the list of target arguments subsequently
passed to ica.find_sources_raw. Either the name of the vertical EOG
channel or the corresponding EOG time series. If None, this step
will be skipped.
eog_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
eog_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
skew_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
kurt_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
var_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
add_nodes : list of ica_nodes
Additional list if tuples carrying the following parameters:
(name : str, target : str | array, score_func : callable,
criterion : float | int | list-like | slice). This parameter is a
generalization of the artifact specific parameters above and has
the same structure. Example::
add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
method : {'fastica', 'picard'}
The ICA method to use. Defaults to 'fastica'.
Returns
-------
ica : instance of ICA
The ICA object with detected artifact sources marked for exclusion.
"""
ica = ICA(n_components=n_components, max_pca_components=max_pca_components,
n_pca_components=n_pca_components, method=method,
noise_cov=noise_cov, random_state=random_state, verbose=verbose)
ica.fit(raw, start=start, stop=stop, picks=picks)
logger.info('%s' % ica)
logger.info(' Now searching for artifacts...')
_detect_artifacts(ica=ica, raw=raw, start_find=start_find,
stop_find=stop_find, ecg_ch=ecg_ch,
ecg_score_func=ecg_score_func,
ecg_criterion=ecg_criterion, eog_ch=eog_ch,
eog_score_func=eog_score_func,
eog_criterion=eog_criterion,
skew_criterion=skew_criterion,
kurt_criterion=kurt_criterion,
var_criterion=var_criterion,
add_nodes=add_nodes)
return ica
@verbose
def _band_pass_filter(ica, sources, target, l_freq, h_freq, verbose=None):
"""Optionally band-pass filter the data."""
if l_freq is not None and h_freq is not None:
logger.info('... filtering ICA sources')
# use FIR here, steeper is better
kw = dict(phase='zero-double', filter_length='10s', fir_window='hann',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
fir_design='firwin2')
sources = filter_data(sources, ica.info['sfreq'], l_freq, h_freq, **kw)
logger.info('... filtering target')
target = filter_data(target, ica.info['sfreq'], l_freq, h_freq, **kw)
elif l_freq is not None or h_freq is not None:
raise ValueError('Must specify both pass bands')
return sources, target
# #############################################################################
# CORRMAP
def _find_max_corrs(all_maps, target, threshold):
"""Compute correlations between template and target components."""
all_corrs = [compute_corr(target, subj.T) for subj in all_maps]
abs_corrs = [np.abs(a) for a in all_corrs]
corr_polarities = [np.sign(a) for a in all_corrs]
if threshold <= 1:
max_corrs = [list(np.nonzero(s_corr > threshold)[0])
for s_corr in abs_corrs]
else:
max_corrs = [list(find_outliers(s_corr, threshold=threshold))
for s_corr in abs_corrs]
am = [l[i] for l, i_s in zip(abs_corrs, max_corrs)
for i in i_s]
median_corr_with_target = np.median(am) if len(am) > 0 else 0
polarities = [l[i] for l, i_s in zip(corr_polarities, max_corrs)
for i in i_s]
maxmaps = [l[i] for l, i_s in zip(all_maps, max_corrs)
for i in i_s]
if len(maxmaps) == 0:
return [], 0, 0, []
newtarget = np.zeros(maxmaps[0].size)
std_of_maps = np.std(np.asarray(maxmaps))
mean_of_maps = np.std(np.asarray(maxmaps))
for maxmap, polarity in zip(maxmaps, polarities):
newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity
newtarget /= len(maxmaps)
newtarget *= std_of_maps
sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])
return newtarget, median_corr_with_target, sim_i_o, max_corrs
@verbose
def corrmap(icas, template, threshold="auto", label=None, ch_type="eeg",
plot=True, show=True, verbose=None, outlines='head', layout=None,
sensors=True, contours=6, cmap=None):
"""Find similar Independent Components across subjects by map similarity.
Corrmap (Viola et al. 2009 Clin Neurophysiol) identifies the best group
match to a supplied template. Typically, feed it a list of fitted ICAs and
a template IC, for example, the blink for the first subject, to identify
specific ICs across subjects.
The specific procedure consists of two iterations. In a first step, the
maps best correlating with the template are identified. In the next step,
the analysis is repeated with the mean of the maps identified in the first
stage.
Run with `plot` and `show` set to `True` and `label=False` to find
good parameters. Then, run with labelling enabled to apply the
labelling in the IC objects. (Running with both `plot` and `labels`
off does nothing.)
Outputs a list of fitted ICAs with the indices of the marked ICs in a
specified field.
The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html
Parameters
----------
icas : list of mne.preprocessing.ICA
A list of fitted ICA objects.
template : tuple | np.ndarray, shape (n_components,)
Either a tuple with two elements (int, int) representing the list
indices of the set from which the template should be chosen, and the
template. E.g., if template=(1, 0), the first IC of the 2nd ICA object
is used.
Or a numpy array whose size corresponds to each IC map from the
supplied maps, in which case this map is chosen as the template.
threshold : "auto" | list of float | float
Correlation threshold for identifying ICs
If "auto", search for the best map by trying all correlations between
0.6 and 0.95. In the original proposal, lower values are considered,
but this is not yet implemented.
If list of floats, search for the best map in the specified range of
correlation strengths. As correlation values, must be between 0 and 1
If float > 0, select ICs correlating better than this.
If float > 1, use find_outliers to identify ICs within subjects (not in
original Corrmap)
Defaults to "auto".
label : None | str
If not None, categorised ICs are stored in a dictionary ``labels_``
under the given name. Preexisting entries will be appended to
(excluding repeats), not overwritten. If None, a dry run is performed
and the supplied ICs are not changed.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. Defaults to 'eeg'.
plot : bool
Should constructed template and selected maps be plotted? Defaults
to True.
show : bool
Show figures if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
outlines : 'head' | dict | None
The outlines to be drawn. If 'head', a head scheme will be drawn. If
dict, each key refers to a tuple of x and y positions. The values in
'mask_pos' will serve as image mask. If None, nothing will be drawn.
Defaults to 'head'. If dict, the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside the
outline. Moreover, a matplotlib patch object can be passed for
advanced masking options, either directly or as a function that returns
patches (required for multi-axis plots).
layout : None | Layout | list of Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). Or a list of Layout if projections
are from different sensor types.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. Defaults to 6.
cmap : None | matplotlib colormap
Colormap for the plot. If ``None``, defaults to 'Reds_r' for norm data,
otherwise to 'RdBu_r'.
Returns
-------
template_fig : fig
Figure showing the template.
labelled_ics : fig
Figure showing the labelled ICs in all ICA decompositions.
"""
if not isinstance(plot, bool):
raise ValueError("`plot` must be of type `bool`")
if threshold == 'auto':
threshold = np.arange(60, 95, dtype=np.float64) / 100.
all_maps = [ica.get_components().T for ica in icas]
# check if template is an index to one IC in one ICA object, or an array
if len(template) == 2:
target = all_maps[template[0]][template[1]]
is_subject = True
elif template.ndim == 1 and len(template) == all_maps[0].shape[1]:
target = template
is_subject = False
else:
raise ValueError("`template` must be a length-2 tuple or an array the "
"size of the ICA maps.")
template_fig, labelled_ics = None, None
if plot is True:
if is_subject: # plotting from an ICA object
ttl = 'Template from subj. {0}'.format(str(template[0]))
template_fig = icas[template[0]].plot_components(
picks=template[1], ch_type=ch_type, title=ttl,
outlines=outlines, cmap=cmap, contours=contours, layout=layout,
show=show)
else: # plotting an array
template_fig = _plot_corrmap([template], [0], [0], ch_type,
icas[0].copy(), "Template",
outlines=outlines, cmap=cmap,
contours=contours, layout=layout,
show=show, template=True)
template_fig.subplots_adjust(top=0.8)
template_fig.canvas.draw()
# first run: use user-selected map
if isinstance(threshold, (int, float)):
if len(all_maps) == 0:
logger.info('No component detected using find_outliers.'
' Consider using threshold="auto"')
return icas
nt, mt, s, mx = _find_max_corrs(all_maps, target, threshold)
elif len(threshold) > 1:
paths = [_find_max_corrs(all_maps, target, t) for t in threshold]
# find iteration with highest avg correlation with target
nt, mt, s, mx = paths[np.argmax([path[2] for path in paths])]
# second run: use output from first run
if isinstance(threshold, (int, float)):
if len(all_maps) == 0 or len(nt) == 0:
if threshold > 1:
logger.info('No component detected using find_outliers. '
'Consider using threshold="auto"')
return icas
nt, mt, s, mx = _find_max_corrs(all_maps, nt, threshold)
elif len(threshold) > 1:
paths = [_find_max_corrs(all_maps, nt, t) for t in threshold]
# find iteration with highest avg correlation with target
nt, mt, s, mx = paths[np.argmax([path[1] for path in paths])]
allmaps, indices, subjs, nones = [list() for _ in range(4)]
logger.info('Median correlation with constructed map: %0.3f' % mt)
if plot is True:
logger.info('Displaying selected ICs per subject.')
for ii, (ica, max_corr) in enumerate(zip(icas, mx)):
if len(max_corr) > 0:
if isinstance(max_corr[0], np.ndarray):
max_corr = max_corr[0]
if label is not None:
ica.labels_[label] = list(set(list(max_corr) +
ica.labels_.get(label, list())))
if plot is True:
allmaps.extend(ica.get_components()[:, max_corr].T)
subjs.extend([ii] * len(max_corr))
indices.extend(max_corr)
else:
if (label is not None) and (label not in ica.labels_):
ica.labels_[label] = list()
nones.append(ii)
if len(nones) == 0:
logger.info('At least 1 IC detected for each subject.')
else:
logger.info('No maps selected for subject(s) ' +
', '.join([str(x) for x in nones]) +
', consider a more liberal threshold.')
if plot is True:
labelled_ics = _plot_corrmap(allmaps, subjs, indices, ch_type, ica,
label, outlines=outlines, cmap=cmap,
contours=contours, layout=layout,
show=show)
return template_fig, labelled_ics
else:
return None
| bsd-3-clause |
Karel-van-de-Plassche/bokeh | bokeh/core/json_encoder.py | 2 | 7205 | ''' Provide a functions and classes to implement a custom JSON encoder for
serializing objects for BokehJS.
The primary interface is provided by the |serialize_json| function, which
uses the custom |BokehJSONEncoder| to produce JSON output.
In general, functions in this module convert values in the following way:
* Datetime values (Python, Pandas, NumPy) are converted to floating point
milliseconds since epoch.
* TimeDelta values are converted to absolute floating point milliseconds.
* RelativeDelta values are converted to dictionaries.
* Decimal values are converted to floating point.
* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed
though this interface are converted to lists. Note, however, that arrays in
data sources inside Bokeh Documents are converted elsewhere, and by default
use a binary encoded format.
* Bokeh ``Model`` instances are usually serialized elsewhere in the context
of an entire Bokeh Document. Models passed trough this interface are
converted to references.
* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or
all their properties and values.
* ``Color`` instances are converted to CSS color values.
.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`
.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import collections
import decimal
import json
import numpy as np
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_type, is_datetime_type, transform_series, transform_array
pd = import_optional('pandas')
rd = import_optional("dateutil.relativedelta")
class BokehJSONEncoder(json.JSONEncoder):
''' A custom ``json.JSONEncoder`` subclass for encoding objects in
accordance with the BokehJS protocol.
'''
def transform_python_types(self, obj):
''' Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
# date/time values that get serialized as milliseconds
if is_datetime_type(obj):
return convert_datetime_type(obj)
# slice objects
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.stop, step=obj.step)
# NumPy scalars
elif np.issubdtype(type(obj), np.floating):
return float(obj)
elif np.issubdtype(type(obj), np.integer):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Decimal values
elif isinstance(obj, decimal.Decimal):
return float(obj)
# RelativeDelta gets serialized as a dict
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years,
months=obj.months,
days=obj.days,
hours=obj.hours,
minutes=obj.minutes,
seconds=obj.seconds,
microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
''' The required ``default`` method for JSONEncoder subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
from ..model import Model
from ..colors import Color
from .has_props import HasProps
# array types -- use force_list here, only binary
# encoding CDS columns for now
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, pretty=None, indent=None, **kwargs):
''' Return a serialized JSON representation of objects, suitable to
send to BokehJS.
This function is typically used to serialize single python objects in
the manner expected by BokehJS. In particular, many datetime values are
automatically normalized to an expected format. Some Bokeh objects can
also be passed, but note that Bokeh models are typically properly
serialized in the context of an entire Bokeh document.
The resulting JSON always has sorted keys. By default. the output is
as compact as possible unless pretty output or indentation is requested.
Args:
obj (obj) : the object to serialize to JSON format
pretty (bool, optional) :
Whether to generate prettified output. If ``True``, spaces are
added after added after separators, and indentation and newlines
are applied. (default: False)
Pretty output can also be enabled with the environment variable
``BOKEH_PRETTY``, which overrides this argument, if set.
indent (int or None, optional) :
Amount of indentation to use in generated JSON output. If ``None``
then no indentation is used, unless pretty output is enabled,
in which case two spaces are used. (default: None)
Any additional keyword arguments are passed to ``json.dumps``, except for
some that are computed internally, and cannot be overridden:
* allow_nan
* indent
* separators
* sort_keys
Examples:
.. code-block:: python
>>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))
>>>print(serialize_json(data))
{"a":[0,1,2],"b":1483228800000.0}
>>> print(serialize_json(data, pretty=True))
{
"a": [
0,
1,
2
],
"b": 1483228800000.0
}
'''
# these args to json.dumps are computed internally and should not be passed along
for name in ['allow_nan', 'separators', 'sort_keys']:
if name in kwargs:
raise ValueError("The value of %r is computed internally, overriding is not permissable." % name)
if pretty is None:
pretty = settings.pretty(False)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
| bsd-3-clause |
olologin/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
Caoimhinmg/PmagPy | programs/conversion_scripts2/jr6_jr6_magic2.py | 1 | 11779 | #!/usr/bin/env python
from __future__ import print_function
from builtins import str
from builtins import range
import pandas as pd
import sys
import numpy as np
import pmagpy.pmag as pmag
def main(command_line=True, **kwargs):
"""
NAME
jr6_jr6_magic.py
DESCRIPTION
converts JR6 .jr6 format files to magic_measurements format files
SYNTAX
jr6_jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify sample naming convention (6 and 7 not yet implemented)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-JR IODP samples measured on the JOIDES RESOLUTION
-v NUM : specify the volume in cc of the sample, default 2.5^3cc
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT
JR6 .jr6 format file
"""
# initialize some stuff
noave=0
#volume=2.5**3 #default volume is a 2.5cm cube
volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed
inst=""
samp_con,Z='1',""
missing=1
demag="N"
er_location_name="unknown"
citation='This study'
args=sys.argv
meth_code="LP-NO"
specnum=1
version_num=pmag.get_version()
Samps=[] # keeps track of sample orientations
user=""
mag_file=""
dir_path='.'
MagRecs=[]
ErSamps=[]
SampOuts=[]
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
tmp_file= "fixed.jr6"
meth_code,JR="",0
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind=args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file= args[ind+1]
if "-spc" in args:
ind = args.index("-spc")
specnum = int(args[ind+1])
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-A" in args: noave=1
if "-mcd" in args:
ind=args.index("-mcd")
meth_code=args[ind+1]
if "-JR" in args:
meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code=meth_code.strip(":")
JR=1
samp_con='5'
if "-v" in args:
ind=args.index("-v")
volume=float(args[ind+1])*1e-6 # enter volume in cc, convert to m^3
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
specnum = kwargs.get('specnum', 1)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
volume = float(kwargs.get('volume', 0))
if not volume:
volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed
else:
#convert cm^3 to m^3
volume *= 1e-6
JR = kwargs.get('JR', 0)
if JR:
if meth_code == "LP-NO":
meth_code = ""
meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code=meth_code.strip(":")
samp_con='5'
# format variables
mag_file = input_dir_path+"/" + mag_file
meas_file = output_dir_path+"/" + meas_file
samp_file = output_dir_path+"/" + samp_file
tmp_file = output_dir_path+"/" + tmp_file
if specnum!=0:
specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
ErSampRec,ErSiteRec={},{}
# parse data
# fix .jr6 file so that there are spaces between all the columns.
pre_data=open(mag_file, 'r')
tmp_data=open(tmp_file, 'w')
line=pre_data.readline()
while line !='':
line=line.replace('-',' -')
#print "line=", line
tmp_data.write(line)
line=pre_data.readline()
tmp_data.close()
pre_data.close()
data=pd.read_csv(tmp_file, delim_whitespace=True,header=None)
if JR==0: #
data.columns=['er_specimen_name','step','x','y','z','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd']
cart=np.array([data['x'],data['y'],data['z']]).transpose()
else: # measured on the Joides Resolution JR6
data.columns=['er_specimen_name','step','negz','y','x','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd']
cart=np.array([data['x'],data['y'],-data['negz']]).transpose()
dir= pmag.cart2dir(cart).transpose()
data['measurement_dec']=dir[0]
data['measurement_inc']=dir[1]
data['measurement_magn_moment']=dir[2]*(10.0**data['expon'])*volume # the data are in A/m - this converts to Am^2
data['measurement_magn_volume']=dir[2]*(10.0**data['expon']) # A/m - data in A/m
data['sample_dip']=-data['sample_dip']
DGEOs,IGEOs=[],[]
for ind in range(len(data)):
dgeo,igeo=pmag.dogeo(data.ix[ind]['measurement_dec'],data.ix[ind]['measurement_inc'],data.ix[ind]['sample_azimuth'],data.ix[ind]['sample_dip'])
DGEOs.append(dgeo)
IGEOs.append(igeo)
data['specimen_dec']=DGEOs
data['specimen_inc']=IGEOs
data['specimen_tilt']='1'
if specnum!=0:
data['er_sample_name']=data['er_specimen_name'][:specnum]
else:
data['er_sample_name']=data['er_specimen_name']
if int(samp_con) in [1, 2, 3, 4, 5, 7]:
data['er_site_name']=pmag.parse_site(data['er_sample_name'],samp_con,Z)
# else:
# if 'er_site_name' in ErSampRec.keys():er_site_name=ErSampRec['er_site_name']
# if 'er_location_name' in ErSampRec.keys():er_location_name=ErSampRec['er_location_name']
# Configure the er_sample table
for rowNum, row in data.iterrows():
sampleFlag=0
for sampRec in SampOuts:
if sampRec['er_sample_name'] == row['er_sample_name']:
sampleFlag=1
break
if sampleFlag == 0:
ErSampRec['er_sample_name']=row['er_sample_name']
ErSampRec['sample_azimuth']=str(row['sample_azimuth'])
ErSampRec['sample_dip']=str(row['sample_dip'])
ErSampRec['magic_method_codes']=meth_code
ErSampRec['er_location_name']=er_location_name
ErSampRec['er_site_name']=row['er_site_name']
ErSampRec['er_citation_names']='This study'
SampOuts.append(ErSampRec.copy())
# Configure the magic_measurements table
for rowNum, row in data.iterrows():
MagRec={}
# MagRec['measurement_description']='Date: '+date
MagRec["er_citation_names"]="This study"
MagRec['er_location_name']=er_location_name
MagRec['er_site_name']=row['er_site_name']
MagRec['er_sample_name']=row['er_sample_name']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=row['er_specimen_name']
MagRec["treatment_ac_field"]='0'
if row['step'] == 'NRM':
meas_type="LT-NO"
elif row['step'][0:2] == 'AD':
meas_type="LT-AF-Z"
treat=float(row['step'][2:])
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
elif row['step'][0] == 'TD':
meas_type="LT-T-Z"
treat=float(row['step'][2:])
MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin
else: # need to add IRM, and ARM options
print("measurement type unknown", row['step'])
return False, "measurement type unknown"
MagRec["measurement_magn_moment"]=str(row['measurement_magn_moment'])
MagRec["measurement_magn_volume"]=str(row['measurement_magn_volume'])
MagRec["measurement_dec"]=str(row['measurement_dec'])
MagRec["measurement_inc"]=str(row['measurement_inc'])
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec.copy())
pmag.magic_write(samp_file,SampOuts,'er_samples')
print("sample orientations put in ",samp_file)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
print("exit!")
return True, meas_file
def do_help():
return main.__doc__
if __name__ == "__main__":
main()
| bsd-3-clause |
3manuek/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
mikebenfield/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
loli/semisupervisedforests | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Sapphirine/Trading-Using-Nonparametric-Time-Series-Classification-Models | logistic.py | 1 | 2116 |
# coding: utf-8
# In[1]:
from numpy import genfromtxt
import numpy as np
import datetime as datatime
my_data = genfromtxt("C:\\Users\\Yufan\\Google Drive\\Interviews\\Interviews Github\\BitCoinTotalReturnSwapStrategy\\bitfinexUSD.csv\\.bitfinexUSD.csv", delimiter =',')
# In[2]:
print my_data.shape
my_data_cut = my_data
# In[3]:
import sklearn.linear_model as linear
# In[7]:
L = my_data.shape[0]
n_training_sample_size = int(L*.9) - 11
print n_training_sample_size
n_testing_sample_size = L - n_training_sample_size -11
n_testing_sample_size
# In[12]:
N = n_training_sample_size
training_data = np.zeros((N,11))
print training_data.shape
for i in xrange(N):
for j in xrange(10):
training_data[i,j] = my_data_cut[i-j+10,1]/my_data_cut[i-j+9,1] #from index 11
if (my_data_cut[i+11,1] > my_data_cut[i+10, 1]):
training_data[i,10] = 1
N = n_testing_sample_size
testing_target = np.zeros((N,1))-1
testing_data = np.zeros((N,10))
reset = n_training_sample_size
print reset
for i in xrange(N):
for j in xrange(10):
testing_data[i,j] = my_data_cut[reset+i-j+10,1]/my_data_cut[reset+i-j+9,1]
if (my_data_cut[i+reset+11,1] > my_data_cut[i+reset+10, 1]):
testing_target[i,0] = 1
else:
testing_target[i,0] = 0
# In[9]:
logistic_regression_model = linear.LogisticRegression()
training_data.shape
# In[10]:
logistic_regression_model.fit(training_data[:,0:-1],training_data[:,10:11])
# In[16]:
beta = logistic_regression_model.coef_
beta_0 = logistic_regression_model.intercept_
predicted_value = logistic_regression_model.predict_proba(testing_data)
hit = 0
for i in xrange(len(predicted_value)):
if predicted_value[i,1] > 0.5 and testing_target[i,0] == 1 or predicted_value[i,1] < 0.5 and testing_target[i,0] == 0:
hit +=1
print hit
print len(predicted_value)
print len(testing_target)
print beta
print beta_
print predicted_value
# In[15]:
nd = my_data_cut
print nd
# In[17]:
predicted_v = logistic_regression_model.score(testing_data,testing_target)
# In[18]:
predicted_v
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/reshape/merge/test_merge_asof.py | 2 | 40511 | import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
from pandas.core.reshape.merge import MergeError
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(object):
def read_data(self, datapath, name, dedupe=False):
path = datapath('reshape', 'merge', 'data', name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, 'trades.csv')
self.quotes = self.read_data(datapath, 'quotes.csv', dedupe=True)
self.asof = self.read_data(datapath, 'asof.csv')
self.tolerance = self.read_data(datapath, 'tolerance.csv')
self.allow_exact_matches = self.read_data(datapath,
'allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, 'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 3, 7]})
result = pd.merge_asof(left, right, on='a')
assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT', 'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.97, np.nan,
np.nan, np.nan],
'ask': [np.nan, 51.98, np.nan,
np.nan, np.nan]},
columns=['time', 'ticker', 'price', 'quantity',
'bid', 'ask'])
result = pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 6, np.nan]})
result = pd.merge_asof(left, right, on='a', direction='forward')
assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 6, 7]})
result = pd.merge_asof(left, right, on='a', direction='nearest')
assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
expected.ticker = expected.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index('time')
quotes = self.quotes
result = merge_asof(trades, quotes,
left_index=True,
right_on='time',
by='ticker')
# left-only index uses right's index, oddly
expected.index = result.index
# time column appears after left's columns
expected = expected[result.columns]
assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index('time')
result = merge_asof(trades, quotes,
left_on='time',
right_index=True,
by='ticker')
assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index('time')
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
result = merge_asof(trades, quotes,
left_index=True,
right_index=True,
by='ticker')
assert_frame_equal(result, expected)
def test_multi_index(self):
# MultiIndex is prohibited
trades = self.trades.set_index(['time', 'price'])
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_index=True,
right_index=True)
trades = self.trades.set_index('time')
quotes = self.quotes.set_index(['time', 'bid'])
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_index=True,
right_index=True)
def test_on_and_index(self):
# 'on' parameter and index together is prohibited
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_on='price',
left_index=True,
right_index=True)
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
right_on='bid',
left_index=True,
right_index=True)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
left_by='ticker',
right_by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'exch',
'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.045',
'20160525 13:30:00.049']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL'],
'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
'NSDQ', 'ARCA'],
'bid': [720.51, 51.95, 51.97, 51.99,
720.50, 97.99],
'ask': [720.92, 51.96, 51.98, 52.00,
720.93, 98.01]},
columns=['time', 'ticker', 'exch', 'bid', 'ask'])
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
columns=['time', 'ticker', 'exch',
'price', 'quantity', 'bid', 'ask'])
result = pd.merge_asof(trades, quotes, on='time',
by=['ticker', 'exch'])
assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': [0, 0, 1, 1, 2],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'exch',
'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.045',
'20160525 13:30:00.049']),
'ticker': [1, 0, 0, 0, 1, 2],
'exch': ['BATS', 'NSDQ', 'ARCA', 'ARCA',
'NSDQ', 'ARCA'],
'bid': [720.51, 51.95, 51.97, 51.99,
720.50, 97.99],
'ask': [720.92, 51.96, 51.98, 52.00,
720.93, 98.01]},
columns=['time', 'ticker', 'exch', 'bid', 'ask'])
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.046',
'20160525 13:30:00.048',
'20160525 13:30:00.050']),
'ticker': [0, 0, 1, 1, 2],
'exch': ['ARCA', 'NSDQ', 'NSDQ', 'BATS', 'NSDQ'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100],
'bid': [np.nan, 51.95, 720.50, 720.51, np.nan],
'ask': [np.nan, 51.96, 720.93, 720.92, np.nan]},
columns=['time', 'ticker', 'exch',
'price', 'quantity', 'bid', 'ask'])
result = pd.merge_asof(trades, quotes, on='time',
by=['ticker', 'exch'])
assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame([
[pd.to_datetime('20160602'), 1, 'a'],
[pd.to_datetime('20160602'), 2, 'a'],
[pd.to_datetime('20160603'), 1, 'b'],
[pd.to_datetime('20160603'), 2, 'b']],
columns=['time', 'k1', 'k2']).set_index('time')
right = pd.DataFrame([
[pd.to_datetime('20160502'), 1, 'a', 1.0],
[pd.to_datetime('20160502'), 2, 'a', 2.0],
[pd.to_datetime('20160503'), 1, 'b', 3.0],
[pd.to_datetime('20160503'), 2, 'b', 4.0]],
columns=['time', 'k1', 'k2', 'value']).set_index('time')
expected = pd.DataFrame([
[pd.to_datetime('20160602'), 1, 'a', 1.0],
[pd.to_datetime('20160602'), 2, 'a', 2.0],
[pd.to_datetime('20160603'), 1, 'b', 3.0],
[pd.to_datetime('20160603'), 2, 'b', 4.0]],
columns=['time', 'k1', 'k2', 'value']).set_index('time')
result = pd.merge_asof(left,
right,
left_index=True,
right_index=True,
by=['k1', 'k2'])
assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(left, right, left_index=True, right_index=True,
left_by=['k1', 'k2'], right_by=['k1'])
def test_basic2(self, datapath):
expected = self.read_data(datapath, 'asof2.csv')
trades = self.read_data(datapath, 'trades2.csv')
quotes = self.read_data(datapath, 'quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes,
left_on='time',
right_on='bid',
by='ticker')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on=['time', 'ticker'],
by='ticker')
with pytest.raises(MergeError):
merge_asof(trades, quotes,
by='ticker')
def test_with_duplicates(self, datapath):
q = pd.concat([self.quotes, self.quotes]).sort_values(
['time', 'ticker']).reset_index(drop=True)
result = merge_asof(self.trades, q,
on='time',
by='ticker')
expected = self.read_data(datapath, 'asof.csv')
assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3]})
df2 = pd.DataFrame({'key': [1, 2, 2],
'right_val': [1, 2, 3]})
result = merge_asof(df1, df2, on='key')
expected = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3],
'right_val': [1, 1, 3]})
assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
allow_exact_matches='foo')
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1s'))
# integer
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1.0)
# invalid negative
with pytest.raises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=-Timedelta('1s'))
with pytest.raises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=-1)
def test_non_sorted(self):
trades = self.trades.sort_values('time', ascending=False)
quotes = self.quotes.sort_values('time', ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
trades = self.trades.sort_values('time')
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
quotes = self.quotes.sort_values('time')
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes,
on='time',
by='ticker')
def test_tolerance(self):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1day'))
expected = self.tolerance
assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, np.nan, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [1, np.nan, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
tolerance=1)
assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{'date': pd.date_range(start=pd.to_datetime('2016-01-02'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value1': np.arange(5)})
right = pd.DataFrame(
{'date': pd.date_range(start=pd.to_datetime('2016-01-01'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value2': list("ABCDE")})
result = pd.merge_asof(left, right, on='date',
tolerance=pd.Timedelta('1 day'))
expected = pd.DataFrame(
{'date': pd.date_range(start=pd.to_datetime('2016-01-02'),
freq='D', periods=5,
tz=pytz.timezone('UTC')),
'value1': np.arange(5),
'value2': list("BCDEE")})
assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({'a': [1.1, 3.5, 10.9],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1.0, 2.5, 3.3, 7.5, 11.5],
'right_val': [1.0, 2.5, 3.3, 7.5, 11.5]})
expected = pd.DataFrame({'a': [1.1, 3.5, 10.9],
'left_val': ['a', 'b', 'c'],
'right_val': [1, 3.3, np.nan]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
tolerance=0.5)
assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index('time')
trades = self.trades.set_index('time')
quotes = self.quotes.set_index('time')
result = pd.merge_asof(trades, quotes,
left_index=True,
right_index=True,
by='ticker',
tolerance=pd.Timedelta('1day'))
assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
allow_exact_matches=False)
expected = self.allow_exact_matches
assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [2, 7, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 7, 11],
'right_val': [1, 2, 3, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [2, 3, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
allow_exact_matches=False)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
tolerance=Timedelta('100ms'),
allow_exact_matches=False)
expected = self.allow_exact_matches_and_tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time')
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [2]})
assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False)
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [1]})
assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False,
tolerance=pd.Timedelta('10ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob'],
'version': [np.nan]})
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030',
'2016-07-15 13:30:00.030']),
'username': ['bob', 'charlie']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time', allow_exact_matches=False,
tolerance=pd.Timedelta('10ms'))
expected = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030',
'2016-07-15 13:30:00.030']),
'username': ['bob', 'charlie'],
'version': [np.nan, np.nan]})
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 3, 4, 6, 11],
'right_val': [1, 3, 4, 6, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [np.nan, 6, 11]})
result = pd.merge_asof(left, right, on='a', direction='forward',
allow_exact_matches=False, tolerance=1)
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 3, 4, 6, 11],
'right_val': [1, 3, 4, 7, 11]})
expected = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c'],
'right_val': [np.nan, 4, 11]})
result = pd.merge_asof(left, right, on='a', direction='nearest',
allow_exact_matches=False, tolerance=1)
assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Y', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e']})
right = pd.DataFrame({'a': [1, 6, 11, 15, 16],
'b': ['X', 'Z', 'Y', 'Z', 'Y'],
'right_val': [1, 6, 11, 15, 16]})
expected = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Y', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e'],
'right_val': [1, np.nan, 11, 15, 16]})
result = pd.merge_asof(left, right, on='a', by='b',
direction='forward')
assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Z', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e']})
right = pd.DataFrame({'a': [1, 6, 11, 15, 16],
'b': ['X', 'Z', 'Z', 'Z', 'Y'],
'right_val': [1, 6, 11, 15, 16]})
expected = pd.DataFrame({'a': [1, 5, 10, 12, 15],
'b': ['X', 'X', 'Z', 'Z', 'Y'],
'left_val': ['a', 'b', 'c', 'd', 'e'],
'right_val': [1, 1, 11, 11, 16]})
result = pd.merge_asof(left, right, on='a', by='b',
direction='nearest')
assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.020',
'20160525 13:30:00.030',
'20160525 13:30:00.040',
'20160525 13:30:00.050',
'20160525 13:30:00.060']),
'key': [1, 2, 1, 3, 2],
'value1': [1.1, 1.2, 1.3, 1.4, 1.5]},
columns=['time', 'key', 'value1'])
df2 = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.015',
'20160525 13:30:00.020',
'20160525 13:30:00.025',
'20160525 13:30:00.035',
'20160525 13:30:00.040',
'20160525 13:30:00.055',
'20160525 13:30:00.060',
'20160525 13:30:00.065']),
'key': [2, 1, 1, 3, 2, 1, 2, 3],
'value2': [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8]},
columns=['time', 'key', 'value2'])
result = pd.merge_asof(df1, df2, on='time', by='key')
expected = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.020',
'20160525 13:30:00.030',
'20160525 13:30:00.040',
'20160525 13:30:00.050',
'20160525 13:30:00.060']),
'key': [1, 2, 1, 3, 2],
'value1': [1.1, 1.2, 1.3, 1.4, 1.5],
'value2': [2.2, 2.1, 2.3, 2.4, 2.7]},
columns=['time', 'key', 'value1', 'value2'])
assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame({
'price': [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
'symbol': list("ABCDEFG")},
columns=['symbol', 'price'])
df2 = pd.DataFrame({
'price': [0.0, 1.0, 100.0],
'mpv': [0.0001, 0.01, 0.05]},
columns=['price', 'mpv'])
df1 = df1.sort_values('price').reset_index(drop=True)
result = pd.merge_asof(df1, df2, on='price')
expected = pd.DataFrame({
'symbol': list("BGACEDF"),
'price': [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
'mpv': [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05]},
columns=['symbol', 'price', 'mpv'])
assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame({
"value": [5, 2, 25, 100, 78, 120, 79],
"symbol": list("ABCDEFG")},
columns=["symbol", "value"])
df1.value = dtype(df1.value)
df2 = pd.DataFrame({
"value": [0, 80, 120, 125],
"result": list("xyzw")},
columns=["value", "result"])
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz")
}, columns=["symbol", "value", "result"])
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame({
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG")},
columns=["symbol", "key", "value"])
df1.value = dtype(df1.value)
df2 = pd.DataFrame({
"value": [0, 80, 120, 125],
"key": [1, 2, 2, 3],
"result": list("xyzw")},
columns=["value", "key", "result"])
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame({
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"]},
columns=["symbol", "key", "value", "result"])
expected.value = dtype(expected.value)
assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame({
'symbol': list("AAABBBCCC"),
'exch': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'price': [3.26, 3.2599, 3.2598, 12.58, 12.59,
12.5, 378.15, 378.2, 378.25]},
columns=['symbol', 'exch', 'price'])
df2 = pd.DataFrame({
'exch': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'price': [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
'mpv': [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0]},
columns=['exch', 'price', 'mpv'])
df1 = df1.sort_values('price').reset_index(drop=True)
df2 = df2.sort_values('price').reset_index(drop=True)
result = pd.merge_asof(df1, df2, on='price', by='exch')
expected = pd.DataFrame({
'symbol': list("AAABBBCCC"),
'exch': [3, 2, 1, 3, 1, 2, 1, 2, 3],
'price': [3.2598, 3.2599, 3.26, 12.5, 12.58,
12.59, 378.15, 378.2, 378.25],
'mpv': [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25]},
columns=['symbol', 'exch', 'price', 'mpv'])
assert_frame_equal(result, expected)
def test_merge_datatype_error(self):
""" Tests merge datatype mismatch error """
msg = r'merge keys \[0\] object and int64, must be the same type'
left = pd.DataFrame({'left_val': [1, 5, 10],
'a': ['a', 'b', 'c']})
right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7],
'a': [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on='a')
@pytest.mark.parametrize('func', [lambda x: x, lambda x: to_datetime(x)],
ids=['numeric', 'datetime'])
@pytest.mark.parametrize('side', ['left', 'right'])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = "Merge keys contain null values on {} side".format(side)
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.])
df_null = pd.DataFrame({'a': nulls, 'left_val': ['a', 'b', 'c']})
df = pd.DataFrame({'a': non_nulls, 'right_val': [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == 'left':
merge_asof(df_null, df, on='a')
else:
merge_asof(df, df_null, on='a')
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{'by_col': pd.DatetimeIndex(['2018-01-01']).tz_localize('UTC'),
'on_col': [2], 'values': ['a']})
right = pd.DataFrame(
{'by_col': pd.DatetimeIndex(['2018-01-01']).tz_localize('UTC'),
'on_col': [1], 'values': ['b']})
result = pd.merge_asof(left, right, by='by_col', on='on_col')
expected = pd.DataFrame([
[pd.Timestamp('2018-01-01', tz='UTC'), 2, 'a', 'b']
], columns=['by_col', 'on_col', 'values_x', 'values_y'])
assert_frame_equal(result, expected)
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/test_join.py | 4 | 6923 | # -*- coding: utf-8 -*-
import numpy as np
from pandas import Index
from pandas._libs import join as _join
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
class TestIndexer(object):
def test_outer_join_indexer(self):
typemap = [('int32', _join.outer_join_indexer_int32),
('int64', _join.outer_join_indexer_int64),
('float32', _join.outer_join_indexer_float32),
('float64', _join.outer_join_indexer_float64),
('object', _join.outer_join_indexer_object)]
for dtype, indexer in typemap:
left = np.arange(3, dtype=dtype)
right = np.arange(2, 5, dtype=dtype)
empty = np.array([], dtype=dtype)
result, lindexer, rindexer = indexer(left, right)
assert isinstance(result, np.ndarray)
assert isinstance(lindexer, np.ndarray)
assert isinstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
exp = np.array([0, 1, 2, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, 0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
exp = np.array([0, 1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(lindexer, exp)
exp = np.array([-1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(rindexer, exp)
def test_left_join_indexer_unique():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = _join.left_join_indexer_unique_int64(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype=np.int64)
assert (np.array_equal(result, expected))
def test_left_outer_join_bug():
left = np.array([0, 1, 0, 1, 1, 2, 3, 1, 0, 2, 1, 2, 0, 1, 1, 2, 3, 2, 3,
2, 1, 1, 3, 0, 3, 2, 3, 0, 0, 2, 3, 2, 0, 3, 1, 3, 0, 1,
3, 0, 0, 1, 0, 3, 1, 0, 1, 0, 1, 1, 0, 2, 2, 2, 2, 2, 0,
3, 1, 2, 0, 0, 3, 1, 3, 2, 2, 0, 1, 3, 0, 2, 3, 2, 3, 3,
2, 3, 3, 1, 3, 2, 0, 0, 3, 1, 1, 1, 0, 2, 3, 3, 1, 2, 0,
3, 1, 2, 0, 2], dtype=np.int64)
right = np.array([3, 1], dtype=np.int64)
max_groups = 4
lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False)
exp_lidx = np.arange(len(left))
exp_ridx = -np.ones(len(left))
exp_ridx[left == 1] = 1
exp_ridx[left == 3] = 0
assert (np.array_equal(lidx, exp_lidx))
assert (np.array_equal(ridx, exp_ridx))
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer_int64(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([2, 4], dtype=np.int64)
bexp = np.array([1, 2], dtype=np.int64)
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.inner_join_indexer_int64(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer_int64(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int64)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.int64)
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.outer_join_indexer_int64(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = _join.left_join_indexer_int64(a, b)
assert_almost_equal(index, a)
aexp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
bexp = np.array([-1, -1, 1, -1, 2], dtype=np.int64)
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
a = np.array([5], dtype=np.int64)
b = np.array([5], dtype=np.int64)
index, ares, bres = _join.left_join_indexer_int64(a, b)
tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.int64))
tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.int64))
def test_left_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.left_join_indexer_int64(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
assert_almost_equal(ridx, exp_ridx)
def test_outer_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.outer_join_indexer_int64(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.int64)
assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.int64)
assert_almost_equal(ridx, exp_ridx)
def test_inner_join_indexer2():
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = _join.inner_join_indexer_int64(idx2.values, idx.values)
exp_res = np.array([1, 1, 2, 5], dtype=np.int64)
assert_almost_equal(res, exp_res)
exp_lidx = np.array([0, 0, 1, 2], dtype=np.int64)
assert_almost_equal(lidx, exp_lidx)
exp_ridx = np.array([0, 1, 2, 3], dtype=np.int64)
assert_almost_equal(ridx, exp_ridx)
| mit |
gciteam6/xgboost | src/models/train_predict_stacking.py | 1 | 5640 | # Built-in modules
from os import path, pardir
import sys
import logging
import re
# not used in this stub but often useful for finding various files
PROJECT_ROOT_DIRPATH = path.join(path.dirname(__file__), pardir, pardir)
sys.path.append(PROJECT_ROOT_DIRPATH)
# Third-party modules
import click
from dotenv import find_dotenv, load_dotenv
import numpy as np
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import ParameterGrid
# Hand-made modules
from src.models.blending import MyBlender
from src.models.stacking import MyStacker
BLEND_MODEL_INSTANCE = PLSRegression()
BLEND_MODEL_BASENAME = "layer1.PLSRegression"
BLEND_MODEL_SEARCHING_PARAMS = {"n_components": np.arange(1, 4)}
LOCATIONS = (
"ukishima",
"ougishima",
"yonekurayama"
)
KWARGS_TO_CSV = {
"sep": "\t"
}
def gen_params_grid():
return ParameterGrid(BLEND_MODEL_SEARCHING_PARAMS)
def gen_param_string(param_dict):
param_str = str()
for (key, value) in param_dict.items():
param_str += "{k}_{v}.".format(k=key, v=value)
return param_str[:-1]
def gen_blender_list(predict_target, location):
blend_model_param_list = gen_params_grid()
blend_model_name_list = [
BLEND_MODEL_BASENAME + \
".{p}.{t}.{l}".format(p=gen_param_string(blend_model_param), t=predict_target, l=location) \
for blend_model_param in blend_model_param_list
]
blender_list = [
MyBlender(BLEND_MODEL_INSTANCE, blend_model_name, blend_model_param) \
for blend_model_name, blend_model_param in zip(blend_model_name_list, blend_model_param_list)
]
return blender_list
def remove_predict_target_and_location_suffix(target_string, predict_target):
matcher = re.search(predict_target, target_string)
return target_string[:matcher.start()-1]
def run_blending(predict_target, location, blender, stacker):
logger = logging.getLogger(__name__)
logger.info('#0: train models')
# retrieve train y
y_true_as_train = pd.read_csv(stacker.gen_y_true_filepath(location),
**stacker.KWARGS_READ_CSV)
y_true_as_train.dropna(axis=0, inplace=True)
logger.info('#1: get y_true as a train data @ {l} !'.format(l=location))
# retrieve train X
df_pred_as_train = stacker.X_train_.loc[y_true_as_train.index, :].copy(deep=True)
logger.info('#1: get y_pred as a train data @ {l} !'.format(l=location))
#
# bifurcation
#
if predict_target == "crossval":
# try cross-validation
pd.DataFrame(
blender.cross_val_predict(df_pred_as_train.as_matrix(), y_true_as_train.as_matrix()),
index=df_pred_as_train.index,
columns=[remove_predict_target_and_location_suffix(blender.model_name, predict_target), ]
).to_csv(
blender.gen_abspath(blender.gen_serialize_filepath("predict", "tsv")),
**KWARGS_TO_CSV
)
logger.info('#2: estimate y_pred of train samples like cross-validation @ {l} !'.format(l=location))
elif predict_target == "test":
# fit model with the whole samples
blender.fit(df_pred_as_train.as_matrix(), y_true_as_train.as_matrix())
logger.info('#2: fit & serialized a model @ {l} !'.format(l=location))
# retrieve test X
df_pred_as_test = stacker.get_concatenated_xgb_predict(predict_target, location)
df_pred_as_test.to_csv(
stacker.path.join(stacker.PROCESSED_DATA_BASEPATH,
"dataset.predict_y.layer_0.{t}.{l}.tsv".format(t=predict_target, l=location)),
**KWARGS_TO_CSV
)
logger.info('#3: get y_pred as a test data @ {l} !'.format(l=location))
# predict
pd.DataFrame(
blender.predict(df_pred_as_test[df_pred_as_train.columns].as_matrix()),
index=df_pred_as_test.index,
columns=[remove_predict_target_and_location_suffix(blender.model_name, predict_target), ]
).to_csv(
blender.gen_abspath(
blender.gen_serialize_filepath("predict", "tsv")),
**KWARGS_TO_CSV
)
logger.info('#4: estimate & save y_pred of test samples @ {l} !'.format(l=location))
@click.command()
@click.option("-t", "predict_target", flag_value="test", default=True)
@click.option("-v", "predict_target", flag_value="crossval")
@click.option("--location", "-l", type=str, default=None)
def main(predict_target, location):
if location is None:
location_list = LOCATIONS
else:
location_list = [location, ]
for place in location_list:
# get blender and stacker
blender_list = gen_blender_list(predict_target, place)
stacker = MyStacker()
# attatch train X to the stacker
stacker.X_train_ = stacker.get_concatenated_xgb_predict("crossval", place)
stacker.X_train_.to_csv(
path.join(stacker.PROCESSED_DATA_BASEPATH,
"dataset.predict_y.layer_0.crossval.{l}.tsv".format(l=place)),
**KWARGS_TO_CSV
)
stacker.X_train_ = stacker.X_train_.loc[:, ~stacker.X_train_.isnull().any()]
for blender in blender_list:
run_blending(predict_target, place, blender, stacker)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| mit |
wwf5067/statsmodels | examples/python/regression_diagnostics.py | 28 | 2876 |
## Regression diagnostics
# This example file shows how to use a few of the ``statsmodels`` regression diagnostic tests in a real-life context. You can learn about more tests and find out more information abou the tests here on the [Regression Diagnostics page.](http://statsmodels.sourceforge.net/stable/diagnostic.html)
#
# Note that most of the tests described here only return a tuple of numbers, without any annotation. A full description of outputs is always included in the docstring and in the online ``statsmodels`` documentation. For presentation purposes, we use the ``zip(name,test)`` construct to pretty-print(short descriptions in the examples below.
# ## Estimate a regression model
from __future__ import print_function
from statsmodels.compat import lzip
import statsmodels
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
# Load data
url = 'http://vincentarelbundock.github.io/Rdatasets/csv/HistData/Guerry.csv'
dat = pd.read_csv(url)
# Fit regression model (using the natural log of one of the regressaors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
# Inspect the results
print(results.summary())
# ## Normality of the residuals
# Jarque-Bera test:
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sms.jarque_bera(results.resid)
lzip(name, test)
# Omni test:
name = ['Chi^2', 'Two-tail probability']
test = sms.omni_normtest(results.resid)
lzip(name, test)
# ## Influence tests
#
# Once created, an object of class ``OLSInfluence`` holds attributes and methods that allow users to assess the influence of each observation. For example, we can compute and extract the first few rows of DFbetas by:
from statsmodels.stats.outliers_influence import OLSInfluence
test_class = OLSInfluence(results)
test_class.dfbetas[:5,:]
# Explore other options by typing ``dir(influence_test)``
#
# Useful information on leverage can also be plotted:
from statsmodels.graphics.regressionplots import plot_leverage_resid2
print(plot_leverage_resid2(results))
# Other plotting options can be found on the [Graphics page.](http://statsmodels.sourceforge.net/stable/graphics.html)
# ## Multicollinearity
#
# Condition number:
np.linalg.cond(results.model.exog)
# ## Heteroskedasticity tests
#
# Breush-Pagan test:
name = ['Lagrange multiplier statistic', 'p-value',
'f-value', 'f p-value']
test = sms.het_breushpagan(results.resid, results.model.exog)
lzip(name, test)
# Goldfeld-Quandt test
name = ['F statistic', 'p-value']
test = sms.het_goldfeldquandt(results.resid, results.model.exog)
lzip(name, test)
# ## Linearity
#
# Harvey-Collier multiplier test for Null hypothesis that the linear specification is correct:
name = ['t value', 'p value']
test = sms.linear_harvey_collier(results)
lzip(name, test)
| bsd-3-clause |
ajrichards/notebook | pymc3/multilevel-radon.py | 2 | 3816 | #!/usr/bin/env python
"""
multi-level example with GLM
Gelman et al.s (2007) radon dataset is a classic for hierarchical modeling
Radon gas is known to be the highest cause of lung cancer in non-smokers.
Here welll investigate this differences and try to make predictions of
radon levels predictions are for in different counties based on the
county itself and the presence of a basement.
radon_ic = b0 +b1*floor_ic + epison
The radon level (i) in county (c) is a linear function of radon levels that
considers multiple levels for the floor
There are 85 counties in MN
pooling - just run a regression on all data and assess
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
data = pd.read_csv('radon.csv')
county_names = data.county.unique()
county_idx = data.county_code.values
n_counties = len(data.county.unique())
print("total counties: %s"%(n_counties))
print data[['county', 'log_radon', 'floor']].head()
## unpooled
############################################################################
run_trace = False
with pm.Model() as unpooled_model:
# Independent parameters for each county
a = pm.Normal('a', 0, sd=100, shape=n_counties)
b = pm.Normal('b', 0, sd=100, shape=n_counties)
# Model error
eps = pm.HalfCauchy('eps', 5)
# Model prediction of radon level
# a[county_idx] translates to a[0, 0, 0, 1, 1, ...],
# we thus link multiple household measures of a county
# to its coefficients.
radon_est = a[county_idx] + b[county_idx]*data.floor.values
# Data likelihood
y = pm.Normal('y', radon_est, sd=eps, observed=data.log_radon)
with unpooled_model:
trace_pickle = "traces/unpooled_radon.pkl"
if run_trace or not os.path.exists(trace_pickle):
tmp = open(trace_pickle,'w')
unpooled_trace = pm.sample(niter, step, start,random_seed=123, progressbar=True)
cPickle.dump(trace,tmp)
tmp.close()
else:
print("...loading saved trace")
tmp = open(trace_pickle,'r')
unpooled_trace = cPickle.load(tmp)
############################################################################
run_trace = False
with pm.Model() as hierarchical_model:
# Hyperpriors for group nodes
mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
sigma_a = pm.HalfCauchy('sigma_a', 5)
mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
sigma_b = pm.HalfCauchy('sigma_b', 5)
# Intercept for each county, distributed around group mean mu_a
# Above we just set mu and sd to a fixed value while here we
# plug in a common group distribution for all a and b (which are
# vectors of length n_counties).
a = pm.Normal('a', mu=mu_a, sd=sigma_a, shape=n_counties)
# Intercept for each county, distributed around group mean mu_a
b = pm.Normal('b', mu=mu_b, sd=sigma_b, shape=n_counties)
# Model error
eps = pm.HalfCauchy('eps', 5)
radon_est = a[county_idx] + b[county_idx] * data.floor.values
# Data likelihood
radon_like = pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)
# Inference button (TM)!
with hierarchical_model:
trace_pickle = "traces/hierarchical_radon.pkl"
if run_trace or not os.path.exists(trace_pickle):
tmp = open(trace_pickle,'w')
hierarchical_trace = pm.sample(niter, step, start,random_seed=123, progressbar=True)
cPickle.dump(trace,tmp)
tmp.close()
else:
print("...loading saved trace")
tmp = open(trace_pickle,'r')
hierarchical_trace = cPickle.load(tmp)
# Plotting the hierarchical model trace -its found values- from 500 iterations onwards (right side plot)
# and its accumulated marginal values (left side plot)
pm.traceplot(hierarchical_trace[1000:]);
plt.show()
| bsd-3-clause |
JulianPasc/Final-Project-Julien-Pascal | src/Calculate_Moments.py | 1 | 41436 | ###############################################
# Calculate the turnover moments from BLS Data
##############################################
"""
Julien Pascal
Script that calculates the moments to match
and that draws a lot of graphs using mainly data
from the BLS
"""
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from more_itertools import unique_everseen
import pylab
import matplotlib.dates as mdates
from math import exp, log
import math
from datetime import date, timedelta as td
import statsmodels.api as sm
import scipy
from tabulate import tabulate
import csv
import statsmodels.formula.api as smm #OLS
from arch import arch_model
#################################
# PATHS
# change "path_main" if necessary
path = '/home/julien/Final-Project-Julien-Pascal/data' #path to the data
path_figure = '/home/julien/Final-Project-Julien-Pascal/figures/' #where to save the figures
path_table = '/home/julien/Final-Project-Julien-Pascal/tables/' #where to save the tables
os.chdir(path) #locate in the folder with the data
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
#################################################
# Select what date to begin with and when to stop
starting_year = 1951 #first quarter 1951. starting point for the HWI
starting_month = 1
starting_date = date(starting_year, starting_month, 1)
stop_year = 2015 #third quarter 2010
stop_month = 7
stop_date = date(stop_year, stop_month, 1)
df = pd.read_csv("LNS.csv") # Date on employment and unemployment
# 1. Create a column with dates objects:
df['Date'] = 0
for i in range(0,len(df['Series ID'])):
df.ix[i,'Date'] = date(df.ix[i,'Year'], df.ix[i,'Period'], 1)
#keep the selected time period:
df = df.loc[df['Date'] <= stop_date]
df = df.loc[df['Date'] >= starting_date]
# 2. Create a vector with dates:
Year = df.loc[df['Series ID'] == 'LNS12000000','Year'].values
Month = df.loc[df['Series ID'] == 'LNS12000000','Period'].values
#Remark: indexing start at 0
dateList = []
for i in range(0,len(Year)):
dateList.append(date(Year[i], Month[i], 1))
#############################
# EMPLOYMENT AND UNEMPLOYMENT
#############################
#Employed: LNS12000000
Employed = df.loc[df['Series ID'] == 'LNS12000000','Value']
#Unemployed: LNS13000000
Unemployed = df.loc[df['Series ID'] == 'LNS13000000','Value']
#Unemployed for less than 5 weeks : LNS13008396
U_5 = df.loc[df['Series ID'] == 'LNS13008396','Value']
#Unemployed for 5-14 Weeks: LNS13008756
U_15 = df.loc[df['Series ID'] == 'LNS13008756','Value']
#Unemployed for 27 Weeks and over: LNS13008636
U_27 = df.loc[df['Series ID'] == 'LNS13008636','Value']
# Unemployment rate:
Unemployment_rate =[]
Unemployment_rate =(Unemployed.values/(Unemployed.values+Employed.values))*100
# Plot Employed, Unemployed: :
fig, ax1 = plt.subplots()
ax1.plot(dateList, Employed, color='r')
ax1.plot(dateList, Unemployed, color='g')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('Employed and unemployed in thousands', color='k')
ax2 = ax1.twinx()
#ax2.plot(dateList, Unemployment_rate, color='b')
#ax2.plot(dateList, Participation_rate, color='K', ls ='--')
ax2.set_ylabel('Unemployment rate', color='k' )
ax1.legend(['Employed', 'Unemployed'], loc='best', fancybox = True, framealpha=0.5)
ax2.fill_between(dateList, 0, Unemployment_rate, color='b', alpha=0.2)
ax2.legend(['Unemployment rate'], loc='best', fancybox = True, framealpha=0.5)
plt.savefig(path_figure + 'Unemployment_1948_2016')
plt.show()
#############################################
# Labor Force Participation Rate Statistics:
############################################
# A. Participation rates:
Participation_rate = df.loc[df['Series ID'] == 'LNS11300000','Value'] #LNS11300000: all sex, 16 and older
Participation_men = df.loc[df['Series ID'] == 'LNS11300001','Value'] #LNS11300001: men, 16 and older
Participation_women = df.loc[df['Series ID'] == 'LNS11300002','Value'] #LNS11300001: women, 16 and older
Participation_16_19 = df.loc[df['Series ID'] == 'LNS11300012','Value'] #'LNS11300012: all sex, between 16 and 19
#B. Monthly recession data: 1 = month of recession
#Source: https://research.stlouisfed.org/fred2/series/USREC
recession_monthly = pd.read_csv("quarterly_recession_indicator.csv") #1 = recession
# keep only the dates inside the good interval:
# Create a column with dates objects:
recession_monthly['Date'] = 0
for i in range(0,len(recession_monthly['observation_date'])):
recession_monthly.ix[i,'Date'] = date(recession_monthly.ix[i,'Year'], recession_monthly.ix[i,'Month'], 1)
#keep the selected time period:
recession_monthly = recession_monthly.loc[recession_monthly['Date'] <= stop_date]
recession_monthly = recession_monthly.loc[recession_monthly['Date'] >= starting_date]
#keep only the dates for which recession = 1:
recession_monthly = recession_monthly.loc[recession_monthly['USRECQ'] == 1]
#create a vector of recession dates:
recession_vector_monthly = recession_monthly['Date'].values
# C. Data on discouraged workers from the OECD:
df_OECD = pd.read_csv('Incidence_of_discouraged_workers_OECD.csv')
# Keep series for women and men:
df_OECD = df_OECD.loc[df_OECD['SEX'] == 'MW',]
# Keep share of the population:
df_OECD = df_OECD.loc[df_OECD['Series'] == 'Share of population',]
Year_OECD = df_OECD['Time'].values
# Create a column with dates objects:
#df_OECD['Date'] = 0
dateListOECD = []
for i in range(0,len(Year_OECD)):
#df_OECD.ix[i,'Date'] = date(Year_OECD[i], 1, 1)
dateListOECD.append(date(Year_OECD[i], 1, 1))
#store the starting date of the discouraged workers series:
start_date_OECD = dateListOECD[0]
end_date_OECD = dateListOECD[len(Year_OECD)-1]
#D. Data on discouraged workers from the CPS:
# Stats on 1994
# LNU05026645: Monthly data; number in thousands
number_discouraged = df.loc[(df['Series ID'] == 'LNU05026645'),'Value']
start_date_discouraged = date(1994, 1, 1)
Unemployed_since_1994 = df.loc[(df['Series ID'] == 'LNS12000000') & (df['Date'] >= start_date_discouraged),'Value']
Employed_since_1994 = df.loc[(df['Series ID'] == 'LNS13000000') & (df['Date'] >= start_date_discouraged),'Value']
share_discouraged_workers = (number_discouraged.values/(Unemployed_since_1994.values+Employed_since_1994.values))*100
#################################################
# Plot Participation rate and discourage workers :
#################################################
fig, ax1 = plt.subplots()
ax1.plot(dateList, Participation_rate, color='b')
ax1.plot(dateList, Participation_men, color='k')
ax1.plot(dateList, Participation_women, color='r')
ax1.plot(dateList, Participation_16_19, color='g')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('Participation rate', color='k')
#add recession vertical lines:
for i in range(0,len(recession_vector_monthly)):
ax1.axvline(recession_vector_monthly[i], color='silver', linewidth = 2, zorder=0)
#ax2 = ax1.twinx()
##ax2.plot(dateListOECD, df_OECD['Value'], color='teal')
#ax2.set_ylabel('', color='teal' )
#ax2.fill_between(dateListOECD, 0, df_OECD['Value'], color='teal', alpha=0.3)
ax1.legend(['All persons','Men','Women','All persons between 16 and 19'], loc='lower center' , fancybox = True, framealpha=0.5)
#ax2.legend(['Share of discourage workers in the population'], loc='best' , fancybox = True, framealpha=0.5)
plt.savefig(path_figure +'Participation_rates')
plt.show()
########################################################
# Create plot showing the influence of discourage wokers
# The series on discourage workers starts only in 1994:
# Shorten the other series in consequence:
#######################################################
Participation_rate_short = df.loc[(df['Series ID'] == 'LNS11300000')&(df['Date'] >= start_date_discouraged ),'Value'] #LNS11300000: all sex, 16 and older
Participation_men_short = df.loc[(df['Series ID'] == 'LNS11300001') & (df['Date'] >= start_date_discouraged ),'Value'] #LNS11300001: men, 16 and older
Participation_women_short = df.loc[(df['Series ID'] == 'LNS11300002') & (df['Date'] >= start_date_discouraged ),'Value'] #LNS11300001: women, 16 and older
Participation_16_19_short = df.loc[(df['Series ID'] == 'LNS11300012') & (df['Date'] >= start_date_discouraged ),'Value'] #'LNS11300012: all sex, between 16 and 19
dateList_short = df.loc[(df['Series ID'] == 'LNS11300000')&(df['Date'] >= start_date_discouraged),'Date']
#keep only the dates for which recession = 1:
recession_monthly_short = recession_monthly.loc[(recession_monthly['USRECQ'] == 1) & (recession_monthly['Date'] >= start_date_discouraged )]
#create a vector of recession dates:
recession_vector_monthly_short = recession_monthly_short['Date'].values
fig, ax1 = plt.subplots()
ax1.plot(dateList_short, Participation_rate_short, color='b')
ax1.plot(dateList_short, Participation_men_short, color='k')
ax1.plot(dateList_short, Participation_women_short, color='r')
ax1.plot(dateList_short, Participation_16_19_short, color='g')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('Participation rate', color='k')
#add recession vertical lines:
for i in range(0,len(recession_vector_monthly_short)):
ax1.axvline(recession_vector_monthly_short[i], color='silver', linewidth = 6, zorder=0,alpha=0.7)
ax2 = ax1.twinx()
ax2.plot(dateList_short, share_discouraged_workers, color='teal', linestyle = '--', alpha=0.8)
ax2.set_ylabel('Discouraged workers share', color='teal')
ax1.legend(['All persons','Men','Women','All persons between 16 and 19'], loc='lower left' , fancybox = True, framealpha=0.5)
ax2.legend(['Discouraged workers share \n (in % of total labor force) '], loc='upper right' , fancybox = True, framealpha=0.5)
plt.savefig(path_figure + 'Participation_and_discouragement')
plt.show()
# Plot unemployed by category:
plt.plot(dateList, U_5, color = 'b')
plt.plot(dateList, U_15, '--k')
plt.plot(dateList, U_27, color = 'r')
plt.title('Unemployed by duration in the US')
plt.legend(['Unemployed for less than 5 weeks', 'Unemployed for 5-14 weeks', 'Unemployed for 27 Weeks and over'], loc='best', fancybox = True, framealpha=0.5)
plt.savefig(path_figure + 'Unemployed_by_category')
plt.show()
#######################################
# Construct Exit Rate from Unemployment
#######################################
F_BLS = []
dateList2 = []
for i in range(0,len(Year)-1):
F_BLS.append(1 - (Unemployed.values[i+1] - U_5.values[i+1])/Unemployed.values[i])
dateList2.append(date(Year[i], Month[i], 1))
S_BLS = []
for i in range(0,len(Year)-1):
S_BLS.append((U_5.values[i+1])/Employed.values[i])
plt.subplot(211)
plt.plot(dateList2, F_BLS, color = 'navy')
plt.fill_between(dateList2, 0, F_BLS, color='navy', alpha=0.2)
#plt.yscale('')
plt.title('Exit rate from unemployment')
plt.subplot(212)
plt.plot(dateList2, S_BLS, color = 'r')
plt.fill_between(dateList2, 0, S_BLS, color='r', alpha=0.2)
#plt.yscale('')
plt.title('Job destruction rate')
plt.savefig(path_figure + 'Turnover_rates_BLS')
plt.show()
#############################################################
# Construct Exit Rate from Unemployment for different workers:
#############################################################
# Number of unemployed workers with duration greater than 5:
U_5p = Unemployed.values - U_5.values
# Number of unemployed workers with duration greater than 15:
U_15p = Unemployed.values - U_5.values- U_15.values
U_27p = U_27.values
F_5 = []
F_15 = []
F_27 = []
for i in range(0,len(Year)):
F_5.append(-4*log(U_5p[i]/Unemployed.values[i])/5)
F_15.append(-4*log(U_15p[i]/Unemployed.values[i])/15)
F_27.append(-4*log(U_27p[i]/Unemployed.values[i])/27)
# Plot unemployed by duration:
plt.plot(dateList, F_5, color = 'b')
plt.plot(dateList, F_15, '--k')
plt.plot(dateList, F_27, color = 'r')
plt.title('Exit rate from unemployment by duration')
plt.legend(['More than 5 weeks', 'More than 15 weeks ', 'More than 27 Weeks'], loc='best', fancybox = True, framealpha=0.5)
plt.savefig(path_figure +'Exit_rate_by_duration')
plt.show()
# Unemploylent by diploma:
# Less than a high school diploma LNS14027659
U1 = df.loc[df['Series ID'] == 'LNS14027659','Value']/100
#High school diploma: LNS14027660
U2 = df.loc[df['Series ID'] == 'LNS14027660','Value']/100
#(Seas) Unemployment Rate - Some College or Associate Degree, 25 yrs. & over: LNS14027689
U3 = df.loc[df['Series ID'] == 'LNS14027689','Value']/100
# Bachelor's degree and Higher: LNS14027662
U4 = df.loc[df['Series ID'] == 'LNS14027662','Value']/100
# Date for education
dateListEdu = []
for i in range(0,len(U1)):
dateListEdu.append(date(df.loc[df['Series ID'] == 'LNS14027659','Year'].values[i],df.loc[df['Series ID'] == 'LNS14027659','Period'].values[i], 1))
# Plot unemployed by education:
plt.plot(dateListEdu, U1, color = 'b')
plt.plot(dateListEdu, U2, '--k')
plt.plot(dateListEdu, U3, color = 'r')
plt.plot(dateListEdu, U4, color = 'g')
#fill in between:
plt.fill_between(dateListEdu, U2, U1, color='b', alpha=0.2)
plt.fill_between(dateListEdu, U3, U2, color='k', alpha=0.2)
plt.fill_between(dateListEdu, U4, U3, color='r', alpha=0.2)
plt.fill_between(dateListEdu, 0, U4, color='g', alpha=0.2)
#plt.title('Unemployment educational attainment')
plt.legend(['Less than a High School Diploma', 'With a High School Diploma', 'Some College or Associate Degree','Bachelors Degree and Higher'], loc='best', fancybox = True, framealpha=0.5)
plt.savefig(path_figure + 'Unemployment_rate_by_education')
plt.show()
#Compute standard deviation of unemployment rate by education:
print(np.std(U1))
print(np.std(U2))
print(np.std(U3))
print(np.std(U4))
#Plot unemployment rate by education versus overall unemployment rate:
#keep the same years:
Unemployment_rate_selected_years = []
for i in range(0,len(Year)):
if date(Year[i], Month[i], 1) >= dateListEdu[0]: #keep the matching dates only
Unemployment_rate_selected_years.append(Unemployment_rate[i]/100) #divide by 100 to be consistent with the other series
degree_line = []
for i in range(0,len(Unemployment_rate_selected_years)):
degree_line.append(Unemployment_rate_selected_years[i])
plt.scatter(Unemployment_rate_selected_years, U1, color = 'b', alpha=0.5)
plt.scatter(Unemployment_rate_selected_years, U2, color = 'k', alpha=0.5)
plt.scatter(Unemployment_rate_selected_years, U3, color = 'r', alpha=0.5)
plt.scatter(Unemployment_rate_selected_years, U4, color = 'g', alpha=0.5)
#plt.scatter(Unemployment_rate_selected_years, degree_line, color = 'grey', alpha=0.2)
plt.legend(['Less than a High School Diploma', 'With a High School Diploma', 'Some College or Associate Degree','Bachelors Degree and Higher'], loc='best', fancybox = True, framealpha=0.5)
plt.xlabel('Overall unemployment rate')
plt.ylabel('Group specific unemployment rate')
plt.savefig(path_figure + 'Overall_vs_group_edu_u_rate')
plt.show()
#Calculate the variance of group specific unemployment levels
#and create a latek table:
table_std_group_unemployment = [['Std.', np.std(U1), np.std(U2), np.std( U3), np.std(U4)]]
print(tabulate(table_std_group_unemployment, headers=['', 'Less High School', 'High School D.', 'Some College or A. Degree', 'B. Degree and Higher'], floatfmt=".3f", tablefmt="latex"))
#save the table in a csv format:
with open(path_table + 'table_std_group_unemployment.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in table_std_group_unemployment]
#################################
#Data for productivity and wages:
#################################
#Load the data:
df2 = pd.read_csv("PRS.csv")
#1. Create date objects:
df2['Date'] = 0 #initialization
for i in range(0,len(df2['Series ID'])):
df2.ix[i,'Date'] = date(df2.ix[i,'Year'], df2.ix[i,'Month'], 1)
#2.keep the selected time period:
df2 = df2.loc[df2['Date'] <= stop_date]
df2 = df2.loc[df2['Date'] >= starting_date]
# 1. Seasonally adjusted real value added in the non farm business sector, Index 2009=100: PRS85006043
Real_output = df2.loc[df2['Series ID'] == 'PRS85006043','Value'].values
# 2. Nonfarm Business Sector: Real Compensation Per Hour, Index 2009=100, Quarterly, Seasonally Adjusted
Real_compensation_hour = df2.loc[df2['Series ID'] == 'COMPRNFB','Value'].values
dateList3 = []
for i in range(0,len(Real_output)):
dateList3.append(date(df2['Year'].values[i],df2['Month'].values[i], 1))
plt.plot(dateList3, Real_output , color = 'b')
plt.plot(dateList3, Real_compensation_hour, color = 'r')
plt.title('Real output and compensation in the non-farm business sector')
plt.legend(['Real output per person', 'Real compensation per hour'], loc='best', fancybox = True, framealpha=0.5)
plt.savefig(path_figure + 'Real_value_added_and_wages_raw_data')
plt.show()
#####################
# Data for vacancies
#####################
# Merge the help wanted index and JOLTS on data vacancies
# Based on the methodology from Lise and Robin (2014)
###############################################
# A. Load the vacancies data and do simple plot
###############################################
path_data = '/home/julien/Documents/COURS/5A/MASTER THESIS/Labor Market/Data/Data/'
#Help Wanted Index 1951
#Source : The Conference Board
#downloaded from http://www.econometricsociety.org/ecta/Supmat/9070_data and programs.zip.
df_HWI = pd.read_stata(path_data + 'help_raw_m.dta')
#Adjust the units:
for i in range(0,len(df_HWI)):
df_HWI.ix[i,'V'] = df_HWI.ix[i,'V']*100 #results in thousands vacancies
#JOLTS:
#source: http://data.bls.gov/
df_JOLTS = pd.read_csv(path_data + 'JOLTS.csv')
df_JOLTS = df_JOLTS.loc[df_JOLTS['Series ID'] == 'JTS00000000JOL'] #Keep only data on vacancy creation in the non farm sectors in thousands
#adjust the indexes:
new_indexes = []
for i in range(0,len(df_JOLTS)):
new_indexes.append(i)
df_JOLTS = df_JOLTS.set_index([new_indexes])
#Create date objects for both series;
df_JOLTS['Date'] = 0
for i in range(0,len(df_JOLTS['Series ID'])):
df_JOLTS.ix[i,'Date'] = date(df_JOLTS.ix[i,'Year'], df_JOLTS.ix[i,'Month'], 1)
df_HWI['Date'] = 0
for i in range(0,len(df_HWI)):
df_HWI.ix[i,'Date'] = date(df_HWI.ix[i,'year'], df_HWI.ix[i,'month'], 1)
dateList_HWI = []
Vacancies_HWI = []
for i in range(0,len(df_HWI)):
dateList_HWI.append(date(df_HWI.ix[i, 'year'],df_HWI.ix[i, 'month'], 1))
Vacancies_HWI.append(df_HWI.ix[i,'V'])
dateList_JOLTS = []
Vacancies_JOLTS = []
for i in range(0,len(df_JOLTS)):
dateList_JOLTS.append(date(df_JOLTS .ix[i, 'Year'],df_JOLTS.ix[i, 'Month'], 1))
Vacancies_JOLTS.append(df_JOLTS.ix[i,'Value'])
plt.plot(dateList_JOLTS, Vacancies_JOLTS, color = 'r')
plt.plot(dateList_HWI, Vacancies_HWI, color = 'b')
plt.legend(['JOLTS','HWI'])
plt.show()
#########################
# B. Merge the two series
#########################
###############################################################
# A. Select the overlap period for both series: 12-2000, 7-2006
starting_year_overlap = 2000 #first quarter 1951
starting_month_overlap = 12
starting_date_overlap = date(starting_year_overlap, starting_month_overlap, 1)
stop_year_overlap = 2006 #third quarter 2010
stop_month_overlap = 7
stop_date_overlap = date(stop_year_overlap, stop_month_overlap, 1)
df_HWI_overlap = df_HWI.loc[(df_HWI['Date'] <= stop_date_overlap) & (df_HWI['Date'] >= starting_date_overlap)]
df_JOLTS_overlap = df_JOLTS.loc[(df_JOLTS['Date'] <= stop_date_overlap) & (df_JOLTS['Date'] >= starting_date_overlap)]
#should be of same length:
len(df_JOLTS_overlap) == len(df_HWI_overlap)
#Adjust indexes for both dataframes:
new_indexes = []
for i in range(0,len(df_JOLTS_overlap)):
new_indexes.append(i)
df_JOLTS_overlap = df_JOLTS_overlap.set_index([new_indexes])
df_HWI_overlap = df_HWI_overlap .set_index([new_indexes])
#Merge both
df_ovelap_merged = pd.merge(df_HWI_overlap , df_JOLTS_overlap, on='Date')
plt.plot(df_JOLTS_overlap['Value'], color = 'r')
plt.plot(df_HWI_overlap['V'], color = 'b')
plt.legend(['JOLTS','HWI'])
plt.show()
##########################################
# B. Project the HWI into the JOLTS space:
# 'V': HWI
# 'Value': JOLTS
result_projection = smm.ols(formula="V ~ Value", data=df_ovelap_merged).fit() #simple OLS
print(result_projection.summary())
intercept = result_projection.params[0]
beta = result_projection.params[1]
####################################################
# C. Predict the HWI values based on observed JOLTS:
#merge the complete HWI and JOLTS:
df_total_merged = pd.merge(df_HWI, df_JOLTS, how = 'outer', on=['Date'])
#Predict:
for i in range(0,len(df_total_merged)):
if df_total_merged.ix[i,'Date'] >= starting_date_overlap: #Start predicting only in 12-2000
df_total_merged.ix[i,'V'] = intercept + beta*df_total_merged.ix[i,'Value'] #Project on the JOLTS space
df_total_merged.ix[i,'year'] = df_total_merged.ix[i,'Year']
df_total_merged.ix[i,'month'] = df_total_merged.ix[i,'Month_y']
# Plot predicted and actual values
plt.plot(df_total_merged['Value'], color = 'r')
plt.plot(df_total_merged['V'], color = 'b')
plt.legend(['JOLTS','HWI predicted'])
plt.show() #Visually check it makes sense
# Plot with only the predicted
#plt.plot(df_total_merged['V'], color = 'b')
plt.fill_between(df_total_merged['Date'].values, 0, df_total_merged['V'].values, color='k', alpha=0.4)
plt.legend(['Vacancies - thousands'])
plt.savefig(path_figure + 'Number_vacancies_monthly')
plt.show()
########################################
# Clean the merged dataframe and save it
#Change column names
df_total_merged=df_total_merged.rename(columns = {'V':'Vacancies_HWI_hat'}) #"hat" because starting in 2000, it was projected on the JOLTS space
df_total_merged=df_total_merged.rename(columns = {'Value':'Vacancies_JOLTS'}) #actual data from JOLTS
df_total_merged=df_total_merged.rename(columns = {'Month_x':'Month'}) #actual data from JOLTS
#Drop some useless columns:
df_total_merged = df_total_merged.drop('Series ID', 1)
df_total_merged = df_total_merged.drop('Year', 1)
df_total_merged = df_total_merged.drop('Period', 1)
df_total_merged = df_total_merged.drop('Month_y', 1)
df_total_merged = df_total_merged.drop('Description', 1)
df_total_merged.to_csv(path_table +'HWI_JOLTS_merged.csv')
# keep the same time frame as the other dataframes for coherence:
df_total_merged = df_total_merged.loc[df_total_merged['Date'] <= stop_date]
df_total_merged = df_total_merged.loc[df_total_merged['Date'] >= starting_date]
#####################
# HP Filter the Data:
#####################
################################################
# 0. Construct quarterly series for monthly data:
################################################
# Unemployment rate:
Unemployment_rate_quarterly = []
dateList4 = []
upper_index = math.floor(len(Unemployment_rate)/3)
a = 0
for i in range(0,upper_index):
#compute the mean for the quarter
Unemployment_rate_quarterly.append((Unemployment_rate[a]+Unemployment_rate[a+1]+Unemployment_rate[a+2]+Unemployment_rate[a+3])/4)
dateList4.append(dateList[a])
a = a + 3
#Plot monthly vers quarterly to visually check the results:
#plt.plot(dateList, Unemployment_rate , color = 'b')
#plt.plot(dateList4, Unemployment_rate_quarterly, color = 'r')
#plt.show()
# Vacancies (in thousands):
Vacancies = []
for i in range(0,len(df_total_merged)):
Vacancies.append(df_total_merged.ix[i,'Vacancies_HWI_hat']) #monthly vacancies in thousands
Vacancies_quarterly = [] #initialize quarterly observations
upper_indexV = math.floor(len(Vacancies)/3)
a = 0
for i in range(0,upper_indexV):
#compute the mean for the quarter
Vacancies_quarterly.append((Vacancies[a]+Vacancies[a+1]+Vacancies[a+2]+Vacancies[a+3])/4)
a = a + 3
#Plot monthly vers quarterly to visually check the results:
plt.plot(dateList, Vacancies, color = 'b')
plt.plot(dateList4, Vacancies_quarterly, color = 'r')
plt.show()
#Scatter plot quarterly vacancies unemployment ratio:
plt.scatter(Unemployment_rate_quarterly, Vacancies_quarterly, color = 'b', alpha=0.5)
plt.xlabel('Unemployment rate %')
plt.ylabel('Vacancies in thousands')
plt.show()
########################################################
# 1st approach: take monthly average, as in Shimer 2005:
"""
# Exit rate from unemployment F_BLS:
F_BLS_quarterly = []
dateList5 = []
a = 0
for i in range(0,upper_index):
#compute the mean for the quarter
F_BLS_quarterly.append((F_BLS[a]+F_BLS[a+1]+F_BLS[a+2]+F_BLS[a+3])/4)
dateList5.append(dateList2[a])
a = a + 3
# job destruction rate S_BLS:
S_BLS_quarterly = []
a = 0
for i in range(0,upper_index):
#compute the mean for the quarter
S_BLS_quarterly.append((S_BLS[a]+S_BLS[a+1]+S_BLS[a+2]+S_BLS[a+3])/4)
a = a + 3
"""
#############################################################
# 2nd approach: see Robin 2011
# Iteration of the monthly series to construct quarterly ones:
S_BLS_quarterly = []
F_BLS_quarterly = []
dateList5 = []
a = 0
for i in range(0,upper_index):
F2_S = S_BLS[a+2]
F2_F = F_BLS[a+2]
F_S = S_BLS[a+1]
F_F = F_BLS[a+1]
S = S_BLS[a]
F = F_BLS[a]
# Quarterly job destruction rate S_BLS:
S_BLS_quarterly.append(F2_S + (1 - F2_S - F2_F)*(F_S + (1 - F_S - F_F)*S)) #formula p1339
dateList5.append(dateList2[a]) #store date
# Quarterly job finding rate:
F_BLS_quarterly.append(1 - S_BLS_quarterly[i] - (1 - S - F)*(1 - F_S - F_F)*(1 - F2_S - F2_F)) #formula p1339
a = a + 3
# Plot this 2nd approach:
plt.subplot(2, 1, 1)
plt.plot(dateList5, F_BLS_quarterly, color = 'navy')
plt.fill_between(dateList5, 0, F_BLS_quarterly, color='navy', alpha=0.2)
plt.title('Quarterly job finding rate')
plt.subplot(2, 1, 2)
plt.plot(dateList5, S_BLS_quarterly, color = 'r')
plt.fill_between(dateList5, 0, S_BLS_quarterly, color='R', alpha=0.2)
plt.title('Quarterly job destruction rate')
plt.savefig(path_figure + 'Quarterly_job_finding_and_job_destruction_rates')
plt.show()
#############################################
# 1. log transformation of the quarterly data:
#############################################
log_Real_output = np.log(Real_output)
log_Real_compensation_hour = np.log(Real_compensation_hour)
log_Unemployment_rate = np.log(Unemployment_rate_quarterly) #Quarterly unemployment rate
log_F_BLS = np.log(F_BLS_quarterly)
log_S_BLS = np.log(S_BLS_quarterly)
log_Vacancies = np.log(Vacancies_quarterly) #Quarterly vacancies in thousands
###############
# 2. HP filter
###############
# Choose the smoothing parameter:
smoothing_parameter = 2.5*math.pow(10,5)
#smoothing_parameter = 1*math.pow(10,5) # as in Shimer 2005
# a. Real value added:
cycle_Real_output, trend_Real_output = sm.tsa.filters.hpfilter(log_Real_output , smoothing_parameter)
# b. Real productivity per hour:
cycle_Real_compensation_hour, trend_Real_compensation_hour = sm.tsa.filters.hpfilter(log_Real_compensation_hour, smoothing_parameter)
# c. Unemployment rate:
cycle_Unemployment_rate, trend_Unemployment_rate = sm.tsa.filters.hpfilter(log_Unemployment_rate, smoothing_parameter)
# d. Job destruction rate S_BLS
cycle_S_BLS, trend_S_BLS = sm.tsa.filters.hpfilter(log_S_BLS, smoothing_parameter)
# e. Exit rate from unemployment F_BLS
cycle_F_BLS, trend_F_BLS = sm.tsa.filters.hpfilter(log_F_BLS, smoothing_parameter)
# f. Vacancies:
cycle_Vacancies, trend_Vacancies = sm.tsa.filters.hpfilter(log_Vacancies , smoothing_parameter)
#####################################
# 3. Exponentiate the detrended data:
#####################################
exponentiated_cycle_Real_output = np.exp(cycle_Real_output)
exponentiated_cycle_Real_compensation_hour = np.exp(cycle_Real_compensation_hour)
exponentiated_cycle_Unemployment_rate = np.exp(cycle_Unemployment_rate)
exponentiated_cycle_F_BLS = np.exp(cycle_F_BLS)
exponentiated_cycle_S_BLS = np.exp(cycle_S_BLS)
exponentiated_cycle_Vacancies = np.exp(cycle_Vacancies)
##########################################
# 4.1 Plots the trend and cycle components:
##########################################
#####################
# a. Real Output:
plt.subplot(2, 2, 1)
plt.plot(dateList3, log_Real_output, color="navy")
plt.title("Log Real Output")
plt.subplot(2, 2, 2)
plt.plot(dateList3, trend_Real_output, color="r",)
plt.title("Trend Component")
plt.subplot(2, 2, 3)
plt.plot(dateList3, cycle_Real_output, color="g")
plt.title("Cycle Component")
plt.subplot(2, 2, 4)
plt.plot(dateList3, exponentiated_cycle_Real_output, color="k")
plt.title("Exponentiated Cycle Component")
plt.savefig(path_figure + 'Detrend_output')
plt.show()
##############################
#b. Real productivity per hour:
plt.subplot(2, 2, 1)
plt.plot(dateList3, log_Real_compensation_hour, color="navy")
plt.title("Log Real Compensation per hour")
plt.subplot(2, 2, 2)
plt.plot(dateList3, trend_Real_compensation_hour, color="r",)
plt.title("Trend Component")
plt.subplot(2, 2, 3)
plt.plot(dateList3, cycle_Real_compensation_hour, color="g")
plt.title("Cycle Component")
plt.subplot(2, 2, 4)
plt.plot(dateList3, exponentiated_cycle_Real_compensation_hour, color="k")
plt.title("Exponentiated Cycle Component")
plt.show()
######################
#c. Unemployment rate:
plt.subplot(2, 2, 1)
plt.plot(dateList4, log_Unemployment_rate, color="navy")
plt.title("Log Quarterly Unemployment rate")
plt.subplot(2, 2, 2)
plt.plot(dateList4, trend_Unemployment_rate, color="r",)
plt.title("Trend Component")
plt.subplot(2, 2, 3)
plt.plot(dateList4, cycle_Unemployment_rate, color="g")
plt.title("Cycle Component")
plt.subplot(2, 2, 4)
plt.plot(dateList4, exponentiated_cycle_Unemployment_rate, color="k")
plt.title("Exponentiated Cycle Component")
plt.show()
#######################
# d. Job finding rate:
plt.subplot(2, 2, 1)
plt.plot(dateList5, log_F_BLS, color="navy")
plt.title("Log Quarterly Job Finding Rate")
plt.subplot(2, 2, 2)
plt.plot(dateList5, trend_F_BLS, color="r",)
plt.title("Trend Component")
plt.subplot(2, 2, 3)
plt.plot(dateList5, cycle_F_BLS, color="g")
plt.title("Cycle Component")
plt.subplot(2, 2, 4)
plt.plot(dateList5, exponentiated_cycle_F_BLS, color="k")
plt.title("Exponentiated Cycle Component")
plt.show()
#########################
# d. Job destruction rate:
plt.subplot(2, 2, 1)
plt.plot(dateList5, log_S_BLS, color="navy")
plt.title("Log Quarterly Job Destruction Rate")
plt.subplot(2, 2, 2)
plt.plot(dateList5, trend_S_BLS, color="r",)
plt.title("Trend Component")
plt.subplot(2, 2, 3)
plt.plot(dateList5, cycle_S_BLS, color="g")
plt.title("Cycle Component")
plt.subplot(2, 2, 4)
plt.plot(dateList5, exponentiated_cycle_S_BLS, color="k")
plt.title("Exponentiated Cycle Component")
plt.show()
###############
# e. Vacancies
plt.subplot(2, 2, 1)
plt.plot(dateList4, log_Vacancies, color="navy")
plt.title("Log Number of Vacancies in Thousands")
plt.subplot(2, 2, 2)
plt.plot(dateList4, trend_Vacancies, color="r",)
plt.title("Trend Component")
plt.subplot(2, 2, 3)
plt.plot(dateList4, cycle_Vacancies, color="g")
plt.title("Cycle Component")
plt.subplot(2, 2, 4)
plt.plot(dateList4, exponentiated_cycle_Vacancies, color="k")
plt.title("Exponentiated Cycle Component")
plt.show()
##########################################################################
#4.2 Plot the comovements of the Business cycle components with Real output
##########################################################################
conv = np.vectorize(mdates.strpdate2num('%Y-%m-%d')) #used to for plotting vertical lines
# load the quarterly recession data:
# source: https://research.stlouisfed.org/fred2
# NBER based Recession Indicators for the United States from the Period following the Peak through the Trough, +1 or 0, Quarterly, Not Seasonally Adjusted
recession = pd.read_csv("quarterly_recession_indicator.csv") #1 = recession
# keep only the dates inside the good interval:
# Create a column with dates objects:
recession['Date'] = 0
for i in range(0,len(recession['observation_date'])):
recession.ix[i,'Date'] = date(recession.ix[i,'Year'], recession.ix[i,'Month'], 1)
#keep the selected time period:
recession = recession.loc[recession['Date'] <= stop_date]
recession = recession.loc[recession['Date'] >= starting_date]
#keep only the dates for which recession = 1:
recession = recession.loc[recession['USRECQ'] == 1]
recession['Date'].values
#create a vector of recession dates:
recession_vector = recession['Date'].values
#########
#a. Wage:
plt.subplot(1, 1, 1)
plt.plot(dateList3, exponentiated_cycle_Real_output, color = 'k', linestyle = '--')
plt.plot(dateList3, exponentiated_cycle_Real_compensation_hour, color = 'navy')
plt.fill_between(dateList3, 1, exponentiated_cycle_Real_output, color='k',alpha=0.4)
plt.fill_between(dateList3, 1, exponentiated_cycle_Real_compensation_hour, color='navy', alpha=0.1)
#add recession vertical lines:
for i in range(0,len(recession_vector)):
plt.axvline(recession_vector[i], color='silver', linewidth = 2, zorder=0)
plt.legend(['Real output', 'Real compensation per hour'], loc='best', fancybox = True, framealpha=0.7)
plt.savefig(path_figure + 'Cycle_wages_output')
plt.show()
#######################
#b. Unemployment rate:
plt.subplot(1, 1, 1)
plt.plot(dateList3, exponentiated_cycle_Real_output, color = 'k', linestyle = '--')
plt.plot(dateList4, exponentiated_cycle_Unemployment_rate, color = 'navy')
plt.fill_between(dateList3, 1, exponentiated_cycle_Real_output, color='k', alpha=0.4)
plt.fill_between(dateList4, 1, exponentiated_cycle_Unemployment_rate, color='navy', alpha=0.1)
#add recession vertical lines:
for i in range(0,len(recession_vector)):
plt.axvline(recession_vector[i], color='silver', linewidth = 2, zorder=0)
plt.legend(['Real output', 'Unemployment rate'], loc='best', fancybox = True, framealpha=0.7)
plt.savefig(path_figure + 'Cycle_unemployment_output')
plt.show()
#####################
#C. Job finding rate:
plt.subplot(1, 1, 1)
plt.plot(dateList3, exponentiated_cycle_Real_output, color = 'k', linestyle = '--')
plt.plot(dateList5, exponentiated_cycle_F_BLS, color = 'navy')
plt.fill_between(dateList3, 1, exponentiated_cycle_Real_output, color='k', alpha=0.4)
plt.fill_between(dateList5, 1, exponentiated_cycle_F_BLS, color='navy', alpha=0.1)
#add recession vertical lines:
for i in range(0,len(recession_vector)):
plt.axvline(recession_vector[i], color='silver', linewidth = 2, zorder=0)
plt.legend(['Real output', 'Job finding rate'], loc='best', fancybox = True, framealpha=0.7)
plt.savefig(path_figure + 'Cycle_job_finding_rate_output')
plt.show()
#########################
#D. Job destruction rate:
plt.subplot(1, 1, 1)
plt.plot(dateList3, exponentiated_cycle_Real_output, color = 'k', linestyle = '--')
plt.plot(dateList5, exponentiated_cycle_S_BLS, color = 'navy')
plt.fill_between(dateList3, 1, exponentiated_cycle_Real_output, color='k', alpha=0.4)
plt.fill_between(dateList5, 1, exponentiated_cycle_S_BLS, color='navy', alpha=0.1)
#add recession vertical lines:
for i in range(0,len(recession_vector)):
plt.axvline(recession_vector[i], color='silver', linewidth = 2, zorder=0)
plt.legend(['Real output', 'Job destruction rate'], loc='best', fancybox = True, framealpha=0.7)
plt.savefig(path_figure + 'Cycle_job_destruction_rate_output')
plt.show()
##############
# E. Vacancies
plt.subplot(1, 1, 1)
plt.plot(dateList3, exponentiated_cycle_Real_output, color = 'k', linestyle = '--')
plt.plot(dateList4, exponentiated_cycle_Vacancies, color = 'navy')
plt.fill_between(dateList3, 1, exponentiated_cycle_Real_output, color='k', alpha=0.4)
plt.fill_between(dateList4, 1, exponentiated_cycle_Vacancies, color='navy', alpha=0.1)
#add recession vertical lines:
for i in range(0,len(recession_vector)):
plt.axvline(recession_vector[i], color='silver', linewidth = 2, zorder=0)
plt.legend(['Real output', 'Job destruction rate'], loc='best', fancybox = True, framealpha=0.7)
plt.savefig(path_figure + 'Cycle_vacancies_output')
plt.show()
#################
# BEVERIDGE CURVE
plt.scatter(cycle_Unemployment_rate, cycle_Vacancies, color = 'b', alpha=0.5)
plt.xlabel('Unemployment')
plt.ylabel('Vacancies')
plt.savefig(path_figure + 'Beveridge_curve')
plt.show()
#######################
# 5. Compute some stats
#######################
#Mean:
mean_data = []
mean_data.append('') #productivity, normalized to 1
mean_data.append(np.mean(Unemployment_rate_quarterly)/100)#quarterly unemployment rate
mean_data.append(np.mean(F_BLS_quarterly)) #quarterly exit rate from unemployment
mean_data.append(np.mean(S_BLS_quarterly)) #quarterly job destruction rate
mean_data.append('') #quarterly vacancies created - in thousands
mean_data.append('') #wage, normalized to 1
#Standard deviation of quarterly observations:
std_data = []
std_data.append(np.std(cycle_Real_output))
std_data.append(np.std(cycle_Unemployment_rate))
std_data.append(np.std(cycle_F_BLS))
std_data.append(np.std(cycle_S_BLS))
std_data.append(np.std(cycle_Vacancies))
std_data.append(np.std(cycle_Real_compensation_hour))
#Skewness:
skew_data = []
skew_data.append(scipy.stats.skew(cycle_Real_output))
skew_data.append(scipy.stats.skew(cycle_Unemployment_rate))
skew_data.append(scipy.stats.skew(cycle_F_BLS))
skew_data.append(scipy.stats.skew(cycle_S_BLS))
skew_data.append(scipy.stats.skew(cycle_Vacancies))
skew_data.append(scipy.stats.skew(cycle_Real_compensation_hour))
#Kurtosis, Normal = 3
kurtosis_data = []
kurtosis_data.append(scipy.stats.kurtosis(cycle_Real_output, fisher=False))
kurtosis_data.append(scipy.stats.kurtosis(cycle_Unemployment_rate, fisher=False))
kurtosis_data.append(scipy.stats.kurtosis(cycle_F_BLS, fisher=False))
kurtosis_data.append(scipy.stats.kurtosis(cycle_S_BLS, fisher=False))
kurtosis_data.append(scipy.stats.kurtosis(cycle_Vacancies, fisher=False))
kurtosis_data.append(scipy.stats.kurtosis(cycle_Real_compensation_hour, fisher=False))
#Autocorrelation:
autocorrelation_data = []
autocorrelation_data.append(estimated_autocorrelation(cycle_Real_output)[1])
autocorrelation_data.append(estimated_autocorrelation(cycle_Unemployment_rate)[1])
autocorrelation_data.append(estimated_autocorrelation(cycle_F_BLS)[1])
autocorrelation_data.append(estimated_autocorrelation(cycle_S_BLS)[1])
autocorrelation_data.append(estimated_autocorrelation(cycle_Vacancies)[1])
autocorrelation_data.append(estimated_autocorrelation(cycle_Real_compensation_hour)[1])
#Correlation with production:
corr_prod_data = []
corr_prod_data.append(np.corrcoef(cycle_Real_output, cycle_Real_output)[0,1])
corr_prod_data.append(np.corrcoef(cycle_Unemployment_rate, cycle_Real_output[0:(len(cycle_Real_output)-1)])[0,1])#drop the last quarter for output so vectors have the same length
corr_prod_data.append(np.corrcoef(cycle_F_BLS, cycle_Real_output[0:(len(cycle_Real_output)-1)])[0,1]) #drop the last quarter for output so vectors have the same length
corr_prod_data.append(np.corrcoef(cycle_S_BLS, cycle_Real_output[0:(len(cycle_Real_output)-1)])[0,1]) #drop the last quarter for output so vectors have the same length
corr_prod_data.append(np.corrcoef(cycle_Vacancies, cycle_Real_output[0:(len(cycle_Real_output)-1)])[0,1])
corr_prod_data.append(np.corrcoef(cycle_Real_compensation_hour, cycle_Real_output)[0,1])
#Correlation with unemployment:
corr_unemployment_data = []
corr_unemployment_data.append(np.corrcoef(cycle_Real_output[0:(len(cycle_Real_output)-1)], cycle_Unemployment_rate)[0,1]) #drop the last quarter for output so vectors have the same length
corr_unemployment_data.append(np.corrcoef(cycle_Unemployment_rate, cycle_Unemployment_rate)[0,1])
corr_unemployment_data.append(np.corrcoef(cycle_F_BLS, cycle_Unemployment_rate)[0,1])
corr_unemployment_data.append(np.corrcoef(cycle_S_BLS, cycle_Unemployment_rate)[0,1])
corr_unemployment_data.append(np.corrcoef(cycle_Vacancies, cycle_Unemployment_rate)[0,1])
corr_unemployment_data.append(np.corrcoef(cycle_Real_compensation_hour[0:(len(cycle_Real_compensation_hour)-1)], cycle_Unemployment_rate)[0,1])
# Create a table:
table = [['Mean', mean_data[0], mean_data[1], mean_data[2], mean_data[3], mean_data[4], mean_data[5]],
['Std', std_data[0], std_data[1], std_data[2], std_data[3], std_data[4], std_data[5]],
['Skewness', skew_data[0], skew_data[1], skew_data[2], skew_data[3], skew_data[4], skew_data[5]],
['Kurtosis', kurtosis_data[0], kurtosis_data[1], kurtosis_data[2], kurtosis_data[3], kurtosis_data[4], kurtosis_data[5]],
['Autocorrelation', autocorrelation_data[0], autocorrelation_data[1], autocorrelation_data[2], autocorrelation_data[3], autocorrelation_data[4], autocorrelation_data[5]],
['Corr. with production', corr_prod_data[0], corr_prod_data[1], corr_prod_data[2], corr_prod_data[3], corr_prod_data[4], corr_prod_data[5]],
['Corr. with unemployment', corr_unemployment_data[0], corr_unemployment_data[1], corr_unemployment_data[2], corr_unemployment_data[3], corr_unemployment_data[4], corr_unemployment_data[5]]]
#print the table
print(tabulate(table, headers=['', 'Productivity', 'Unemployment rate', 'Exit rate from Un.', 'Job destruction rate', 'Vacancies', 'Wage'], floatfmt=".3f", tablefmt="latex"))
#save the table in a csv format:
with open(path_table + 'table_moments.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
[writer.writerow(r) for r in table]
##############################
# ARCH Model for productivity
#am = arch_model(exponentiated_cycle_Real_output)
#res = am.fit() | mit |
shogun-toolbox/shogun | examples/undocumented/python/graphical/inverse_covariance_estimation_demo.py | 1 | 2383 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from pylab import show, imshow
import shogun as sg
def simulate_data (n,p):
#create a random pxp covariance matrix
cov = np.random.normal(size=(p,p))
#generate data set with multivariate Gaussian distribution
mean = [0] * p
data = np.random.multivariate_normal(mean, cov, n)
return data
def inverse_covariance (data,lc):
sic = sg.SparseInverseCovariance()
#by default cov() expects each row to represent a variable, with observations in the columns
cov = np.cov(data.T)
max_cov = cov.max()
min_cov = cov.min()
#compute inverse conariance matrix
Si = sic.estimate(cov,lc)
return Si
def draw_graph(sic, subplot):
import networkx as nx
#create list of edges
#an egde means there is a dependency between variables
#0 value in sic matrix mean independent variables given all the other variables
p = sic.shape[0]
X, Y = np.meshgrid(range(p), range(p))
graph = np.array((X[sic != 0], Y[sic != 0])).T
# extract nodes from graph
nodes = set([n1 for n1, n2 in graph] + [n2 for n1, n2 in graph])
# create networkx graph
G=nx.Graph()
# add nodes
for node in nodes:
G.add_node(node)
# add edges
for edge in graph:
G.add_edge(edge[0], edge[1])
# draw graph
nx.draw(G, ax=subplot)
# show graph
return graph
if __name__=='__main__':
#edit here for your own simulation
num_observations = 100
num_variables = 11
penalties = [0.00001, 0.05, 0.1, 0.5, 1, 2]
columns = len(penalties)
#plot the heat map and the graphs of dependency between variables
#for different penaltiy values
f, axarr = plt.subplots(2, columns)
f.suptitle('Inverse Covariance Estimation\nfor ' +str(num_variables)+' variables and '+str(num_observations)+' observations', fontsize=20)
data = simulate_data(num_observations, num_variables)
print data.shape
column = -1
for p in penalties:
column = column + 1
sic = inverse_covariance (data,p)
i = axarr[0, column].imshow(sic, cmap="hot", interpolation='nearest')
axarr[0, column].set_title('penalty='+str(p), fontsize=10)
graph = draw_graph(sic, plt.subplot(2, columns, column + columns + 1))
axarr[1, column].set_title(str((len(graph) - num_variables)/2) + ' depedences', fontsize=10)
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
f.colorbar(i, cax=cbar_ax)
show()
| bsd-3-clause |
marqh/cartopy | lib/cartopy/examples/tissot.py | 1 | 1582 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
"""
This example demonstrates the way a box is warped when it is defined in a rotated space.
XXX Keep the meaning, but re-word the following line:
Notice that the box contains the north pole, adding extra complexity to the underlying transformation.
"""
# XXX Needs Geodetic functionality from Basemap to work. Consider using http://geographiclib.sourceforge.net
import matplotlib.pyplot as plt
import cartopy
import numpy
def main():
pc = cartopy.prj.PlateCarree()
ax = plt.axes(projection=cartopy.prj.Mercator())
ax.coastlines()
# g = ax.projection.as_geodetic()
radius = 15 # degrees
for lat in numpy.arange(-80, 90, 20):
for lon in numpy.arange(-160, 180, 40):
ax.geod_circle_meters(lon, lat, 500e3, facecolor='blue', alpha=0.7)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
aljoscha/flink | flink-python/pyflink/fn_execution/beam/beam_coder_impl_slow.py | 1 | 26845 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import pickle
import struct
from typing import Any, Tuple
from typing import List
import pyarrow as pa
from apache_beam.coders.coder_impl import StreamCoderImpl, create_InputStream, create_OutputStream
from pyflink.fn_execution.ResettableIO import ResettableIO
from pyflink.common import Row, RowKind
from pyflink.table.utils import pandas_to_arrow, arrow_to_pandas
ROW_KIND_BIT_SIZE = 2
class FlattenRowCoderImpl(StreamCoderImpl):
def __init__(self, field_coders):
self._field_coders = field_coders
self._field_count = len(field_coders)
# the row kind uses the first 2 bits of the bitmap, the remaining bits are used for null
# mask, for more details refer to:
# https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/RowSerializer.java
self._leading_complete_bytes_num = (self._field_count + ROW_KIND_BIT_SIZE) // 8
self._remaining_bits_num = (self._field_count + ROW_KIND_BIT_SIZE) % 8
self.null_mask_search_table = self.generate_null_mask_search_table()
self.null_byte_search_table = (0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01)
self.row_kind_search_table = [0x00, 0x80, 0x40, 0xC0]
self.data_out_stream = create_OutputStream()
@staticmethod
def generate_null_mask_search_table():
"""
Each bit of one byte represents if the column at the corresponding position is None or not,
e.g. 0x84 represents the first column and the sixth column are None.
"""
null_mask = []
for b in range(256):
every_num_null_mask = [(b & 0x80) > 0, (b & 0x40) > 0, (b & 0x20) > 0, (b & 0x10) > 0,
(b & 0x08) > 0, (b & 0x04) > 0, (b & 0x02) > 0, (b & 0x01) > 0]
null_mask.append(tuple(every_num_null_mask))
return tuple(null_mask)
def encode_to_stream(self, value, out_stream, nested):
field_coders = self._field_coders
data_out_stream = self.data_out_stream
self._write_mask(value, data_out_stream)
for i in range(self._field_count):
item = value[i]
if item is not None:
field_coders[i].encode_to_stream(item, data_out_stream, nested)
out_stream.write_var_int64(data_out_stream.size())
out_stream.write(data_out_stream.get())
data_out_stream._clear()
def encode_nested(self, value: List):
data_out_stream = self.data_out_stream
self._encode_one_row_to_stream(value, data_out_stream, True)
result = data_out_stream.get()
data_out_stream._clear()
return result
def decode_from_stream(self, in_stream, nested):
while in_stream.size() > 0:
in_stream.read_var_int64()
yield self._decode_one_row_from_stream(in_stream, nested)[1]
def _encode_one_row_to_stream(self, value, out_stream, nested):
field_coders = self._field_coders
if isinstance(value, Row):
self._write_mask(value, out_stream, value.get_row_kind().value)
else:
self._write_mask(value, out_stream)
for i in range(self._field_count):
item = value[i]
if item is not None:
field_coders[i].encode_to_stream(item, out_stream, nested)
def _decode_one_row_from_stream(
self, in_stream: create_InputStream, nested: bool) -> Tuple[int, List]:
row_kind_and_null_mask = self._read_mask(in_stream)
row_kind_value = 0
for i in range(ROW_KIND_BIT_SIZE):
row_kind_value += int(row_kind_and_null_mask[i]) * 2 ** i
return row_kind_value, [None if row_kind_and_null_mask[idx + ROW_KIND_BIT_SIZE] else
self._field_coders[idx].decode_from_stream(
in_stream, nested) for idx in range(0, self._field_count)]
def _write_mask(self, value, out_stream, row_kind_value=0):
field_pos = 0
null_byte_search_table = self.null_byte_search_table
remaining_bits_num = self._remaining_bits_num
# first byte contains the row kind bits
b = self.row_kind_search_table[row_kind_value]
for i in range(0, 8 - ROW_KIND_BIT_SIZE):
if field_pos + i < len(value) and value[field_pos + i] is None:
b |= null_byte_search_table[i + ROW_KIND_BIT_SIZE]
field_pos += 8 - ROW_KIND_BIT_SIZE
out_stream.write_byte(b)
for _ in range(1, self._leading_complete_bytes_num):
b = 0x00
for i in range(0, 8):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
field_pos += 8
out_stream.write_byte(b)
if self._leading_complete_bytes_num >= 1 and remaining_bits_num:
b = 0x00
for i in range(remaining_bits_num):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
out_stream.write_byte(b)
def _read_mask(self, in_stream):
mask = []
mask_search_table = self.null_mask_search_table
remaining_bits_num = self._remaining_bits_num
for _ in range(self._leading_complete_bytes_num):
b = in_stream.read_byte()
mask.extend(mask_search_table[b])
if remaining_bits_num:
b = in_stream.read_byte()
mask.extend(mask_search_table[b][0:remaining_bits_num])
return mask
def __repr__(self):
return 'FlattenRowCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class RowCoderImpl(FlattenRowCoderImpl):
def __init__(self, field_coders, field_names):
super(RowCoderImpl, self).__init__(field_coders)
self.field_names = field_names
def encode_to_stream(self, value: Row, out_stream, nested):
self._encode_one_row_to_stream(value, out_stream, nested)
def decode_from_stream(self, in_stream, nested):
row_kind_value, fields = self._decode_one_row_from_stream(in_stream, nested)
row = Row(*fields)
row.set_field_names(self.field_names)
row.set_row_kind(RowKind(row_kind_value))
return row
def __repr__(self):
return 'RowCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class TableFunctionRowCoderImpl(StreamCoderImpl):
def __init__(self, flatten_row_coder):
self._flatten_row_coder = flatten_row_coder
self._field_count = flatten_row_coder._field_count
def encode_to_stream(self, iter_value, out_stream, nested):
is_row_or_tuple = False
if iter_value:
if isinstance(iter_value, (tuple, Row)):
iter_value = [iter_value]
is_row_or_tuple = True
for value in iter_value:
if self._field_count == 1 and not is_row_or_tuple:
value = (value,)
self._flatten_row_coder.encode_to_stream(value, out_stream, nested)
out_stream.write_var_int64(1)
out_stream.write_byte(0x00)
def decode_from_stream(self, in_stream, nested):
return self._flatten_row_coder.decode_from_stream(in_stream, nested)
def __repr__(self):
return 'TableFunctionRowCoderImpl[%s]' % repr(self._flatten_row_coder)
class AggregateFunctionRowCoderImpl(StreamCoderImpl):
"""
The aggregate function row coder impl is similar to the table function row coder
(one message may produce two more message, e.g. one INSERT message may produce one
UPDATE_BEFORE message and one UPDATE_AFTER message). The difference is that this row
coder will encode row kind information into the output row and is no need to encode the
bytes which represent the end of output.
"""
def __init__(self, flatten_row_coder):
self._flatten_row_coder = flatten_row_coder
self._data_out_stream = create_OutputStream()
def encode_to_stream(self, iter_value, out_stream, nested):
data_out_stream = self._data_out_stream
for value in iter_value:
self._flatten_row_coder._encode_one_row_to_stream(value, data_out_stream, nested)
out_stream.write_var_int64(data_out_stream.size())
out_stream.write(data_out_stream.get())
data_out_stream._clear()
def decode_from_stream(self, in_stream, nested):
return [[item for item in self._flatten_row_coder.decode_from_stream(in_stream, nested)]]
def __repr__(self):
return 'AggregateFunctionRowCoderImpl[%s]' % repr(self._flatten_row_coder)
class BasicArrayCoderImpl(StreamCoderImpl):
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(len(value))
for elem in value:
if elem is None:
out_stream.write_byte(False)
else:
out_stream.write_byte(True)
self._elem_coder.encode_to_stream(elem, out_stream, nested)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
elements = [self._elem_coder.decode_from_stream(in_stream, nested)
if in_stream.read_byte() else None for _ in range(size)]
return elements
def __repr__(self):
return 'BasicArrayCoderImpl[%s]' % repr(self._elem_coder)
class PrimitiveArrayCoderImpl(StreamCoderImpl):
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(len(value))
for elem in value:
self._elem_coder.encode_to_stream(elem, out_stream, nested)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
elements = [self._elem_coder.decode_from_stream(in_stream, nested) for _ in range(size)]
return elements
def __repr__(self):
return 'PrimitiveArrayCoderImpl[%s]' % repr(self._elem_coder)
class PickledBytesCoderImpl(StreamCoderImpl):
def __init__(self):
self.field_coder = BinaryCoderImpl()
def encode_to_stream(self, value, out_stream, nested):
coded_data = pickle.dumps(value)
self.field_coder.encode_to_stream(coded_data, out_stream, nested)
def decode_from_stream(self, in_stream, nested):
return self._decode_one_value_from_stream(in_stream, nested)
def _decode_one_value_from_stream(self, in_stream: create_InputStream, nested):
real_data = self.field_coder.decode_from_stream(in_stream, nested)
value = pickle.loads(real_data)
return value
def __repr__(self) -> str:
return 'PickledBytesCoderImpl[%s]' % str(self.field_coder)
class DataStreamMapCoderImpl(StreamCoderImpl):
def __init__(self, field_coder):
self._field_coder = field_coder
self.data_out_stream = create_OutputStream()
def encode_to_stream(self, value, stream,
nested): # type: (Any, create_OutputStream, bool) -> None
data_out_stream = self.data_out_stream
self._field_coder.encode_to_stream(value, data_out_stream, nested)
stream.write_var_int64(data_out_stream.size())
stream.write(data_out_stream.get())
data_out_stream._clear()
def decode_from_stream(self, stream, nested): # type: (create_InputStream, bool) -> Any
while stream.size() > 0:
stream.read_var_int64()
yield self._field_coder.decode_from_stream(stream, nested)
def __repr__(self):
return 'DataStreamMapCoderImpl[%s]' % repr(self._field_coder)
class DataStreamFlatMapCoderImpl(StreamCoderImpl):
def __init__(self, field_coder):
self._field_coder = field_coder
def encode_to_stream(self, iter_value, stream,
nested): # type: (Any, create_OutputStream, bool) -> None
if iter_value:
for value in iter_value:
self._field_coder.encode_to_stream(value, stream, nested)
stream.write_var_int64(1)
stream.write_byte(0x00)
def decode_from_stream(self, stream, nested):
return self._field_coder.decode_from_stream(stream, nested)
def __str__(self) -> str:
return 'DataStreamFlatMapCoderImpl[%s]' % repr(self._field_coder)
class DataStreamCoFlatMapCoderImpl(StreamCoderImpl):
def __init__(self, field_coder):
self._field_coder = field_coder
def encode_to_stream(self, iter_value, stream,
nested): # type: (Any, create_OutputStream, bool) -> None
for value in iter_value:
self._field_coder.encode_to_stream(value, stream, nested)
def decode_from_stream(self, stream, nested):
return self._field_coder.decode_from_stream(stream, nested)
def __str__(self) -> str:
return 'DataStreamCoFlatMapCoderImpl[%s]' % repr(self._field_coder)
class MapCoderImpl(StreamCoderImpl):
def __init__(self, key_coder, value_coder):
self._key_coder = key_coder
self._value_coder = value_coder
def encode_to_stream(self, map_value, out_stream, nested):
out_stream.write_bigendian_int32(len(map_value))
for key in map_value:
self._key_coder.encode_to_stream(key, out_stream, nested)
value = map_value[key]
if value is None:
out_stream.write_byte(True)
else:
out_stream.write_byte(False)
self._value_coder.encode_to_stream(map_value[key], out_stream, nested)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
map_value = {}
for _ in range(size):
key = self._key_coder.decode_from_stream(in_stream, nested)
is_null = in_stream.read_byte()
if is_null:
map_value[key] = None
else:
value = self._value_coder.decode_from_stream(in_stream, nested)
map_value[key] = value
return map_value
def __repr__(self):
return 'MapCoderImpl[%s]' % ' : '.join([repr(self._key_coder), repr(self._value_coder)])
class BigIntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int64(value)
def decode_from_stream(self, in_stream, nested):
return in_stream.read_bigendian_int64()
class TinyIntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write(struct.pack('b', value))
def decode_from_stream(self, in_stream, nested):
return struct.unpack('b', in_stream.read(1))[0]
class SmallIntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write(struct.pack('>h', value))
def decode_from_stream(self, in_stream, nested):
return struct.unpack('>h', in_stream.read(2))[0]
class IntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(value)
def decode_from_stream(self, in_stream, nested):
return in_stream.read_bigendian_int32()
class BooleanCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_byte(value)
def decode_from_stream(self, in_stream, nested):
return not not in_stream.read_byte()
class FloatCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write(struct.pack('>f', value))
def decode_from_stream(self, in_stream, nested):
return struct.unpack('>f', in_stream.read(4))[0]
class DoubleCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_double(value)
def decode_from_stream(self, in_stream, nested):
return in_stream.read_bigendian_double()
class DecimalCoderImpl(StreamCoderImpl):
def __init__(self, precision, scale):
self.context = decimal.Context(prec=precision)
self.scale_format = decimal.Decimal(10) ** -scale
def encode_to_stream(self, value, out_stream, nested):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = value.quantize(self.scale_format)
bytes_value = str(value).encode("utf-8")
out_stream.write_bigendian_int32(len(bytes_value))
out_stream.write(bytes_value, False)
decimal.setcontext(user_context)
def decode_from_stream(self, in_stream, nested):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
size = in_stream.read_bigendian_int32()
value = decimal.Decimal(in_stream.read(size).decode("utf-8")).quantize(self.scale_format)
decimal.setcontext(user_context)
return value
class BigDecimalCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, stream, nested):
bytes_value = str(value).encode("utf-8")
stream.write_bigendian_int32(len(bytes_value))
stream.write(bytes_value, False)
def decode_from_stream(self, stream, nested):
size = stream.read_bigendian_int32()
value = decimal.Decimal(stream.read(size).decode("utf-8"))
return value
class TupleCoderImpl(StreamCoderImpl):
def __init__(self, field_coders):
self._field_coders = field_coders
self._field_count = len(field_coders)
def encode_to_stream(self, value, out_stream, nested):
field_coders = self._field_coders
for i in range(self._field_count):
field_coders[i].encode_to_stream(value[i], out_stream, nested)
def decode_from_stream(self, stream, nested):
decoded_list = [field_coder.decode_from_stream(stream, nested)
for field_coder in self._field_coders]
return (*decoded_list,)
def __repr__(self) -> str:
return 'TupleCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class BinaryCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(len(value))
out_stream.write(value, False)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
return in_stream.read(size)
class CharCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
bytes_value = value.encode("utf-8")
out_stream.write_bigendian_int32(len(bytes_value))
out_stream.write(bytes_value, False)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
return in_stream.read(size).decode("utf-8")
class DateCoderImpl(StreamCoderImpl):
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(self.date_to_internal(value))
def decode_from_stream(self, in_stream, nested):
value = in_stream.read_bigendian_int32()
return self.internal_to_date(value)
def date_to_internal(self, d):
return d.toordinal() - self.EPOCH_ORDINAL
def internal_to_date(self, v):
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimeCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(self.time_to_internal(value))
def decode_from_stream(self, in_stream, nested):
value = in_stream.read_bigendian_int32()
return self.internal_to_time(value)
def time_to_internal(self, t):
milliseconds = (t.hour * 3600000
+ t.minute * 60000
+ t.second * 1000
+ t.microsecond // 1000)
return milliseconds
def internal_to_time(self, v):
seconds, milliseconds = divmod(v, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, milliseconds * 1000)
class TimestampCoderImpl(StreamCoderImpl):
def __init__(self, precision):
self.precision = precision
def is_compact(self):
return self.precision <= 3
def encode_to_stream(self, value, out_stream, nested):
milliseconds, nanoseconds = self.timestamp_to_internal(value)
if self.is_compact():
assert nanoseconds == 0
out_stream.write_bigendian_int64(milliseconds)
else:
out_stream.write_bigendian_int64(milliseconds)
out_stream.write_bigendian_int32(nanoseconds)
def decode_from_stream(self, in_stream, nested):
if self.is_compact():
milliseconds = in_stream.read_bigendian_int64()
nanoseconds = 0
else:
milliseconds = in_stream.read_bigendian_int64()
nanoseconds = in_stream.read_bigendian_int32()
return self.internal_to_timestamp(milliseconds, nanoseconds)
def timestamp_to_internal(self, timestamp):
seconds = int(timestamp.replace(tzinfo=datetime.timezone.utc).timestamp())
microseconds_of_second = timestamp.microsecond
milliseconds = seconds * 1000 + microseconds_of_second // 1000
nanoseconds = microseconds_of_second % 1000 * 1000
return milliseconds, nanoseconds
def internal_to_timestamp(self, milliseconds, nanoseconds):
second, microsecond = (milliseconds // 1000,
milliseconds % 1000 * 1000 + nanoseconds // 1000)
return datetime.datetime.utcfromtimestamp(second).replace(microsecond=microsecond)
class LocalZonedTimestampCoderImpl(TimestampCoderImpl):
def __init__(self, precision, timezone):
super(LocalZonedTimestampCoderImpl, self).__init__(precision)
self.timezone = timezone
def internal_to_timestamp(self, milliseconds, nanoseconds):
return self.timezone.localize(
super(LocalZonedTimestampCoderImpl, self).internal_to_timestamp(
milliseconds, nanoseconds))
class ArrowCoderImpl(StreamCoderImpl):
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
self._resettable_io = ResettableIO()
self._batch_reader = ArrowCoderImpl._load_from_stream(self._resettable_io)
self._batch_writer = pa.RecordBatchStreamWriter(self._resettable_io, self._schema)
self.data_out_stream = create_OutputStream()
self._resettable_io.set_output_stream(self.data_out_stream)
def encode_to_stream(self, cols, out_stream, nested):
data_out_stream = self.data_out_stream
self._batch_writer.write_batch(
pandas_to_arrow(self._schema, self._timezone, self._field_types, cols))
out_stream.write_var_int64(data_out_stream.size())
out_stream.write(data_out_stream.get())
data_out_stream._clear()
def decode_from_stream(self, in_stream, nested):
while in_stream.size() > 0:
yield self._decode_one_batch_from_stream(in_stream, in_stream.read_var_int64())
@staticmethod
def _load_from_stream(stream):
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def _decode_one_batch_from_stream(self, in_stream: create_InputStream, size: int) -> List:
self._resettable_io.set_input_bytes(in_stream.read(size))
# there is only one arrow batch in the underlying input stream
return arrow_to_pandas(self._timezone, self._field_types, [next(self._batch_reader)])
def __repr__(self):
return 'ArrowCoderImpl[%s]' % self._schema
class OverWindowArrowCoderImpl(StreamCoderImpl):
def __init__(self, arrow_coder):
self._arrow_coder = arrow_coder
self._int_coder = IntCoderImpl()
def encode_to_stream(self, value, stream, nested):
self._arrow_coder.encode_to_stream(value, stream, nested)
def decode_from_stream(self, in_stream, nested):
while in_stream.size():
remaining_size = in_stream.read_var_int64()
window_num = self._int_coder.decode_from_stream(in_stream, nested)
remaining_size -= 4
window_boundaries_and_arrow_data = []
for _ in range(window_num):
window_size = self._int_coder.decode_from_stream(in_stream, nested)
remaining_size -= 4
window_boundaries_and_arrow_data.append(
[self._int_coder.decode_from_stream(in_stream, nested)
for _ in range(window_size)])
remaining_size -= 4 * window_size
window_boundaries_and_arrow_data.append(
self._arrow_coder._decode_one_batch_from_stream(in_stream, remaining_size))
yield window_boundaries_and_arrow_data
def __repr__(self):
return 'OverWindowArrowCoderImpl[%s]' % self._arrow_coder
class PassThroughLengthPrefixCoderImpl(StreamCoderImpl):
def __init__(self, value_coder):
self._value_coder = value_coder
def encode_to_stream(self, value, out: create_OutputStream, nested: bool) -> Any:
self._value_coder.encode_to_stream(value, out, nested)
def decode_from_stream(self, in_stream: create_InputStream, nested: bool) -> Any:
return self._value_coder.decode_from_stream(in_stream, nested)
def get_estimated_size_and_observables(self, value: Any, nested=False):
return 0, []
def __repr__(self):
return 'PassThroughLengthPrefixCoderImpl[%s]' % self._value_coder
| apache-2.0 |
petosegan/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/sparse/frame/test_frame.py | 1 | 53954 | # pylint: disable-msg=E1101,W0612
import operator
import numpy as np
from numpy import nan
import pytest
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas.compat import lrange
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Panel, Series, bdate_range, compat
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.sparse import frame as spf
from pandas.core.sparse.api import (
SparseArray, SparseDataFrame, SparseDtype, SparseSeries)
from pandas.tests.frame.test_api import SharedWithSparse
from pandas.util import testing as tm
from pandas.tseries.offsets import BDay
class TestSparseDataFrame(SharedWithSparse):
klass = SparseDataFrame
# SharedWithSparse tests use generic, klass-agnostic assertion
_assert_frame_equal = staticmethod(tm.assert_sp_frame_equal)
_assert_series_equal = staticmethod(tm.assert_sp_series_equal)
def test_iterrows(self, float_frame, float_string_frame):
# Same as parent, but we don't ensure the sparse kind is the same.
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_sp_series_equal(v, exp, check_kind=False)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_sp_series_equal(v, exp, check_kind=False)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = self.klass._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_sp_series_equal(s, expected, check_kind=False)
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = SparseDataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_sp_frame_equal(res, exp)
def test_values(self, empty_frame, float_frame):
empty = empty_frame.values
assert empty.shape == (0, 0)
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.values
assert mat.shape == (10, 0)
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.values
assert mat.shape == (0, 10)
def test_copy(self, float_frame):
cp = float_frame.copy()
assert isinstance(cp, SparseDataFrame)
tm.assert_sp_frame_equal(cp, float_frame)
# as of v0.15.0
# this is now identical (but not is_a )
assert cp.index.identical(float_frame.index)
def test_constructor(self, float_frame, float_frame_int_kind,
float_frame_fill0):
for col, series in compat.iteritems(float_frame):
assert isinstance(series, SparseSeries)
assert isinstance(float_frame_int_kind['A'].sp_index, IntIndex)
# constructed zframe from matrix above
assert float_frame_fill0['A'].fill_value == 0
# XXX: changed asarray
expected = pd.SparseArray([0, 0, 0, 0, 1., 2., 3., 4., 5., 6.],
fill_value=0, kind='block')
tm.assert_sp_array_equal(expected,
float_frame_fill0['A'].values)
tm.assert_numpy_array_equal(np.array([0., 0., 0., 0., 1., 2.,
3., 4., 5., 6.]),
float_frame_fill0['A'].to_dense().values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
assert isinstance(series, SparseSeries)
# construct from nested dict
data = {c: s.to_dict() for c, s in compat.iteritems(float_frame)}
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, float_frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = float_frame.index[:5]
cons = SparseDataFrame(
float_frame, index=idx, columns=float_frame.columns,
default_fill_value=float_frame.default_fill_value,
default_kind=float_frame.default_kind, copy=True)
reindexed = float_frame.reindex(idx)
tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
with pytest.raises(TypeError):
float_frame.reindex(idx, level=0)
repr(float_frame)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {'b': [2, 3], 'a': [0, 1]}
frame = SparseDataFrame(data=d)
if compat.PY36:
expected = SparseDataFrame(data=d, columns=list('ba'))
else:
expected = SparseDataFrame(data=d, columns=list('ab'))
tm.assert_sp_frame_equal(frame, expected)
def test_constructor_ndarray(self, float_frame):
# no index or columns
sp = SparseDataFrame(float_frame.values)
# 1d
sp = SparseDataFrame(float_frame['A'].values, index=float_frame.index,
columns=['A'])
tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=['A']))
# raise on level argument
pytest.raises(TypeError, float_frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
with pytest.raises(ValueError, match="^Index length"):
SparseDataFrame(float_frame.values, index=float_frame.index[:-1])
with pytest.raises(ValueError, match="^Column length"):
SparseDataFrame(float_frame.values,
columns=float_frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
assert len(sp.index) == 0
assert len(sp.columns) == 0
def test_constructor_dataframe(self, float_frame):
dense = float_frame.to_dense()
sp = SparseDataFrame(dense)
tm.assert_sp_frame_equal(sp, float_frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
assert sdf[0].index is sdf[1].index
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
assert isinstance(x, SparseSeries)
df = SparseDataFrame(x)
assert isinstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.loc[:9998] = np.NaN
# TODO: x_sparse is unused...fix
x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.loc[:9998] = 0
# TODO: y_sparse is unsused...fix
y_sparse = y.to_sparse(fill_value=0) # noqa
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_constructor_from_dense_series(self):
# GH 19393
# series with name
x = Series(np.random.randn(10000), name='a')
result = SparseDataFrame(x)
expected = x.to_frame().to_sparse()
tm.assert_sp_frame_equal(result, expected)
# series with no name
x = Series(np.random.randn(10000))
result = SparseDataFrame(x)
expected = x.to_frame().to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_constructor_from_unknown_type(self):
# GH 19393
class Unknown(object):
pass
with pytest.raises(TypeError,
match=('SparseDataFrame called with unknown type '
'"Unknown" for data argument')):
SparseDataFrame(Unknown())
def test_constructor_preserve_attr(self):
# GH 13866
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
df = pd.SparseDataFrame({'x': arr})
assert df['x'].dtype == SparseDtype(np.int64)
assert df['x'].fill_value == 0
s = pd.SparseSeries(arr, name='x')
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
df = pd.SparseDataFrame(s)
assert df['x'].dtype == SparseDtype(np.int64)
assert df['x'].fill_value == 0
df = pd.SparseDataFrame({'x': s})
assert df['x'].dtype == SparseDtype(np.int64)
assert df['x'].fill_value == 0
def test_constructor_nan_dataframe(self):
# GH 10079
trains = np.arange(100)
thresholds = [10, 20, 30, 40, 50, 60]
tuples = [(i, j) for i in trains for j in thresholds]
index = pd.MultiIndex.from_tuples(tuples,
names=['trains', 'thresholds'])
matrix = np.empty((len(index), len(trains)))
matrix.fill(np.nan)
df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float)
result = df.to_sparse()
expected = pd.SparseDataFrame(matrix, index=index, columns=trains,
dtype=float)
tm.assert_sp_frame_equal(result, expected)
def test_type_coercion_at_construction(self):
# GH 15682
result = pd.SparseDataFrame(
{'a': [1, 0, 0], 'b': [0, 1, 0], 'c': [0, 0, 1]}, dtype='uint8',
default_fill_value=0)
expected = pd.SparseDataFrame(
{'a': pd.SparseSeries([1, 0, 0], dtype='uint8'),
'b': pd.SparseSeries([0, 1, 0], dtype='uint8'),
'c': pd.SparseSeries([0, 0, 1], dtype='uint8')},
default_fill_value=0)
tm.assert_sp_frame_equal(result, expected)
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'Sparse[float64, nan]': 4})
tm.assert_series_equal(result, expected)
def test_shape(self, float_frame, float_frame_int_kind,
float_frame_fill0, float_frame_fill2):
# see gh-10452
assert float_frame.shape == (10, 4)
assert float_frame_int_kind.shape == (10, 4)
assert float_frame_fill0.shape == (10, 4)
assert float_frame_fill2.shape == (10, 4)
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self, float_frame):
res = np.sqrt(float_frame)
dres = np.sqrt(float_frame.to_dense())
tm.assert_frame_equal(res.to_dense(), dres)
def test_pickle(self, float_frame, float_frame_int_kind, float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _test_roundtrip(frame, orig):
result = tm.round_trip_pickle(frame)
tm.assert_sp_frame_equal(frame, result)
tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
_test_roundtrip(SparseDataFrame(), DataFrame())
_test_roundtrip(float_frame, float_frame_dense)
_test_roundtrip(float_frame_int_kind, float_frame_dense)
_test_roundtrip(float_frame_fill0, float_frame_fill0_dense)
_test_roundtrip(float_frame_fill2, float_frame_fill2_dense)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
assert isinstance(sdf, SparseDataFrame)
assert np.isnan(sdf.default_fill_value)
assert isinstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
assert isinstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
assert sdf.default_fill_value == 0
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
assert df.density == 0.7
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
assert df.density == 0.75
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self, float_frame):
self._check_frame_ops(float_frame)
def test_sparse_series_ops_i(self, float_frame_int_kind):
self._check_frame_ops(float_frame_int_kind)
def test_sparse_series_ops_z(self, float_frame_fill0):
self._check_frame_ops(float_frame_fill0)
def test_sparse_series_ops_fill(self, float_frame_fill2):
self._check_frame_ops(float_frame_fill2)
def _check_frame_ops(self, frame):
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
fill = sparse_result.default_fill_value
dense_result = dense_result.to_sparse(fill_value=fill)
tm.assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
assert isinstance(mixed_result, SparseDataFrame)
tm.assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'], frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]),
SparseSeries(
[], index=[])]
for op in opnames:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), getattr(operator, op))
# 2304, no auto-broadcasting
for i, s in enumerate(series):
f = lambda a, b: getattr(a, op)(b, axis='index')
_compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
# rops are not implemented
# _compare_to_dense(s, frame, s.to_dense(),
# frame.to_dense(), f)
# cross-sectional operations
series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]),
frame.xs(fidx[7]), frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(), s, op)
_compare_to_dense(s, frame, s, frame.to_dense(), op)
# it works!
result = frame + frame.loc[:, ['A', 'B']] # noqa
def test_op_corners(self, float_frame, empty_frame):
empty = empty_frame + empty_frame
assert empty.empty
foo = float_frame + empty_frame
assert isinstance(foo.index, DatetimeIndex)
tm.assert_frame_equal(foo, float_frame * np.nan)
foo = empty_frame + float_frame
tm.assert_frame_equal(foo, float_frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
tm.assert_sp_frame_equal(result, exp)
pytest.raises(Exception, sdf.__getitem__, ['a', 'd'])
def test_iloc(self, float_frame):
# GH 2227
result = float_frame.iloc[:, 0]
assert isinstance(result, SparseSeries)
tm.assert_sp_series_equal(result, float_frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
tm.assert_class_equal(iframe['A'].sp_index,
iframe.iloc[:, 0].sp_index)
def test_set_value(self, float_frame):
# ok, as the index gets converted to object
frame = float_frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = frame.set_value('foobar', 'B', 1.5)
assert res.index.dtype == 'object'
res = float_frame
res.index = res.index.astype(object)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = float_frame.set_value('foobar', 'B', 1.5)
assert res is not float_frame
assert res.index[-1] == 'foobar'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res.get_value('foobar', 'B') == 1.5
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res2 = res.set_value('foobar', 'qux', 1.5)
assert res2 is not res
tm.assert_index_equal(res2.columns,
pd.Index(list(float_frame.columns) + ['qux']))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res2.get_value('foobar', 'qux') == 1.5
def test_fancy_index_misc(self, float_frame):
# axis = 0
sliced = float_frame.iloc[-2:, :]
expected = float_frame.reindex(index=float_frame.index[-2:])
tm.assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = float_frame.iloc[:, -2:]
expected = float_frame.reindex(columns=float_frame.columns[-2:])
tm.assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self, float_frame):
# slicing
sl = float_frame[:20]
tm.assert_sp_frame_equal(sl,
float_frame.reindex(float_frame.index[:20]))
# boolean indexing
d = float_frame.index[5]
indexer = float_frame.index > d
subindex = float_frame.index[indexer]
subframe = float_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
pytest.raises(Exception, float_frame.__getitem__, indexer[:-1])
def test_setitem(self, float_frame, float_frame_int_kind,
float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check_frame(frame, orig):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
assert isinstance(frame['E'], SparseSeries)
tm.assert_sp_series_equal(frame['E'], frame['A'],
check_names=False)
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(frame.index)
result = frame['E'].to_dense()
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == 'E'
# insert Series
frame['F'] = frame['A'].to_dense()
assert isinstance(frame['F'], SparseSeries)
tm.assert_sp_series_equal(frame['F'], frame['A'],
check_names=False)
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(frame.index)
expected.name = 'G'
tm.assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
assert isinstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
assert len(frame['I'].sp_values) == N // 2
# insert ndarray wrong size
pytest.raises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
assert len(frame['J'].sp_values) == N
assert (frame['J'].sp_values == 5).all()
frame['K'] = frame.default_fill_value
assert len(frame['K'].sp_values) == 0
_check_frame(float_frame, float_frame_dense)
_check_frame(float_frame_int_kind, float_frame_dense)
_check_frame(float_frame_fill0, float_frame_fill0_dense)
_check_frame(float_frame_fill2, float_frame_fill2_dense)
@pytest.mark.parametrize('values', [
[True, False],
[0, 1],
[1, None],
['a', 'b'],
[pd.Timestamp('2017'), pd.NaT],
[pd.Timedelta('10s'), pd.NaT],
])
def test_setitem_more(self, values):
df = pd.DataFrame({"A": values})
df['A'] = pd.SparseArray(values)
expected = pd.DataFrame({'A': pd.SparseArray(values)})
tm.assert_frame_equal(df, expected)
def test_setitem_corner(self, float_frame):
float_frame['a'] = float_frame['B']
tm.assert_sp_series_equal(float_frame['a'], float_frame['B'],
check_names=False)
def test_setitem_array(self, float_frame):
arr = float_frame['B']
float_frame['E'] = arr
tm.assert_sp_series_equal(float_frame['E'], float_frame['B'],
check_names=False)
float_frame['F'] = arr[:-1]
index = float_frame.index[:-1]
tm.assert_sp_series_equal(float_frame['E'].reindex(index),
float_frame['F'].reindex(index),
check_names=False)
def test_setitem_chained_no_consolidate(self):
# https://github.com/pandas-dev/pandas/pull/19268
# issuecomment-361696418
# chained setitem used to cause consolidation
sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])
with pd.option_context('mode.chained_assignment', None):
sdf[0][1] = 2
assert len(sdf._data.blocks) == 2
def test_delitem(self, float_frame):
A = float_frame['A']
C = float_frame['C']
del float_frame['B']
assert 'B' not in float_frame
tm.assert_sp_series_equal(float_frame['A'], A)
tm.assert_sp_series_equal(float_frame['C'], C)
del float_frame['D']
assert 'D' not in float_frame
del float_frame['A']
assert 'A' not in float_frame
def test_set_columns(self, float_frame):
float_frame.columns = float_frame.columns
pytest.raises(Exception, setattr, float_frame, 'columns',
float_frame.columns[:-1])
def test_set_index(self, float_frame):
float_frame.index = float_frame.index
pytest.raises(Exception, setattr, float_frame, 'index',
float_frame.index[:-1])
def test_ctor_reindex(self):
idx = pd.Index([0, 1, 2, 3])
with pytest.raises(ValueError, match=''):
pd.SparseDataFrame({"A": [1, 2]}, index=idx)
def test_append(self, float_frame):
a = float_frame[:5]
b = float_frame[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended, float_frame, exact_indices=False)
a = float_frame.iloc[:5, :3]
b = float_frame.iloc[5:]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Stacklevel is set for pd.concat, not append
appended = a.append(b)
tm.assert_sp_frame_equal(appended.iloc[:, :3], float_frame.iloc[:, :3],
exact_indices=False)
a = a[['B', 'C', 'A']].head(2)
b = b.head(2)
expected = pd.SparseDataFrame({
"B": [0., 1, None, 3],
"C": [0., 1, 5, 6],
"A": [None, None, 2, 3],
"D": [None, None, 5, None],
}, index=a.index | b.index, columns=['B', 'C', 'A', 'D'])
with tm.assert_produces_warning(None):
appended = a.append(b, sort=False)
tm.assert_frame_equal(appended, expected)
with tm.assert_produces_warning(None):
appended = a.append(b, sort=True)
tm.assert_sp_frame_equal(appended, expected[['A', 'B', 'C', 'D']],
consolidate_block_indices=True,
check_kind=False)
def test_astype(self):
sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4],
dtype=np.int64),
'B': SparseArray([4, 5, 6, 7],
dtype=np.int64)})
assert sparse['A'].dtype == SparseDtype(np.int64)
assert sparse['B'].dtype == SparseDtype(np.int64)
# retain fill_value
res = sparse.astype(np.float64)
exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],
fill_value=0,
kind='integer'),
'B': SparseArray([4., 5., 6., 7.],
fill_value=0,
kind='integer')},
default_fill_value=np.nan)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == SparseDtype(np.float64, 0)
assert res['B'].dtype == SparseDtype(np.float64, 0)
# update fill_value
res = sparse.astype(SparseDtype(np.float64, np.nan))
exp = pd.SparseDataFrame({'A': SparseArray([1., 2., 3., 4.],
fill_value=np.nan,
kind='integer'),
'B': SparseArray([4., 5., 6., 7.],
fill_value=np.nan,
kind='integer')},
default_fill_value=np.nan)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == SparseDtype(np.float64, np.nan)
assert res['B'].dtype == SparseDtype(np.float64, np.nan)
def test_astype_bool(self):
sparse = pd.SparseDataFrame({'A': SparseArray([0, 2, 0, 4],
fill_value=0,
dtype=np.int64),
'B': SparseArray([0, 5, 0, 7],
fill_value=0,
dtype=np.int64)},
default_fill_value=0)
assert sparse['A'].dtype == SparseDtype(np.int64)
assert sparse['B'].dtype == SparseDtype(np.int64)
res = sparse.astype(SparseDtype(bool, False))
exp = pd.SparseDataFrame({'A': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False,
kind='integer'),
'B': SparseArray([False, True, False, True],
dtype=np.bool,
fill_value=False,
kind='integer')},
default_fill_value=False)
tm.assert_sp_frame_equal(res, exp)
assert res['A'].dtype == SparseDtype(np.bool)
assert res['B'].dtype == SparseDtype(np.bool)
def test_astype_object(self):
# This may change in GH-23125
df = pd.DataFrame({"A": SparseArray([0, 1]),
"B": SparseArray([0, 1])})
result = df.astype(object)
dtype = SparseDtype(object, 0)
expected = pd.DataFrame({"A": SparseArray([0, 1], dtype=dtype),
"B": SparseArray([0, 1], dtype=dtype)})
tm.assert_frame_equal(result, expected)
def test_fillna(self, float_frame_fill0, float_frame_fill0_dense):
df = float_frame_fill0.reindex(lrange(5))
dense = float_frame_fill0_dense.reindex(lrange(5))
result = df.fillna(0)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result.fillna(0, inplace=True)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result = df['A']
result.fillna(0, inplace=True)
expected = dense['A'].fillna(0)
# this changes internal SparseArray repr
# tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))
tm.assert_series_equal(result.to_dense(), expected)
def test_fillna_fill_value(self):
df = pd.DataFrame({'A': [1, 0, 0], 'B': [np.nan, np.nan, 4]})
sparse = pd.SparseDataFrame(df)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
sparse = pd.SparseDataFrame(df, default_fill_value=0)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
with tm.assert_produces_warning(PerformanceWarning):
result = result.fillna(method='pad', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
with tm.assert_produces_warning(PerformanceWarning):
result = result.fillna(method='backfill', limit=5)
with tm.assert_produces_warning(PerformanceWarning):
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_rename(self, float_frame):
result = float_frame.rename(index=str)
expected = SparseDataFrame(float_frame.values,
index=float_frame.index.strftime(
"%Y-%m-%d %H:%M:%S"),
columns=list('ABCD'))
tm.assert_sp_frame_equal(result, expected)
result = float_frame.rename(columns=lambda x: '%s%d' % (x, 1))
data = {'A1': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B1': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C1': np.arange(10, dtype=np.float64),
'D1': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
expected = SparseDataFrame(data, index=float_frame.index)
tm.assert_sp_frame_equal(result, expected)
def test_corr(self, float_frame):
res = float_frame.corr()
# XXX: this stays sparse
tm.assert_frame_equal(res, float_frame.to_dense().corr().to_sparse())
def test_describe(self, float_frame):
float_frame['foo'] = np.nan
float_frame.get_dtype_counts()
str(float_frame)
desc = float_frame.describe() # noqa
def test_join(self, float_frame):
left = float_frame.loc[:, ['A', 'B']]
right = float_frame.loc[:, ['C', 'D']]
joined = left.join(right)
tm.assert_sp_frame_equal(joined, float_frame, exact_indices=False)
right = float_frame.loc[:, ['B', 'D']]
pytest.raises(Exception, left.join, right)
with pytest.raises(ValueError, match='Other Series must have a name'):
float_frame.join(Series(
np.random.randn(len(float_frame)), index=float_frame.index))
def test_reindex(self, float_frame, float_frame_int_kind,
float_frame_fill0, float_frame_fill2):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5] # noqa
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
tm.assert_frame_equal(sparse_result.to_dense(), dense_result)
tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(),
dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(index)
tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
tm.assert_almost_equal(sparse_result.default_fill_value,
frame.default_fill_value)
tm.assert_almost_equal(sparse_result['A'].fill_value,
frame['A'].fill_value)
# length zero
length_zero = frame.reindex([])
assert len(length_zero) == 0
assert len(length_zero.columns) == len(frame.columns)
assert len(length_zero['A']) == 0
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
assert len(length_n) == len(frame)
assert len(length_n.columns) == len(frame.columns)
assert len(length_n['A']) == len(frame)
# reindex columns
reindexed = frame.reindex(columns=['A', 'B', 'Z'])
assert len(reindexed.columns) == 3
tm.assert_almost_equal(reindexed['Z'].fill_value,
frame.default_fill_value)
assert np.isnan(reindexed['Z'].sp_values).all()
_check_frame(float_frame)
_check_frame(float_frame_int_kind)
_check_frame(float_frame_fill0)
_check_frame(float_frame_fill2)
# with copy=False
reindexed = float_frame.reindex(float_frame.index, copy=False)
reindexed['F'] = reindexed['A']
assert 'F' in float_frame
reindexed = float_frame.reindex(float_frame.index)
reindexed['G'] = reindexed['A']
assert 'G' not in float_frame
def test_reindex_fill_value(self, float_frame_fill0,
float_frame_fill0_dense):
rng = bdate_range('20110110', periods=20)
result = float_frame_fill0.reindex(rng, fill_value=0)
exp = float_frame_fill0_dense.reindex(rng, fill_value=0)
exp = exp.to_sparse(float_frame_fill0.default_fill_value)
tm.assert_sp_frame_equal(result, exp)
def test_reindex_method(self):
sparse = SparseDataFrame(data=[[11., 12., 14.],
[21., 22., 24.],
[41., 42., 44.]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# Over indices
# default method
result = sparse.reindex(index=range(6))
expected = SparseDataFrame(data=[[nan, nan, nan],
[11., 12., 14.],
[21., 22., 24.],
[nan, nan, nan],
[41., 42., 44.],
[nan, nan, nan]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
result = sparse.reindex(index=range(6), method='bfill')
expected = SparseDataFrame(data=[[11., 12., 14.],
[11., 12., 14.],
[21., 22., 24.],
[41., 42., 44.],
[41., 42., 44.],
[nan, nan, nan]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='ffill'
result = sparse.reindex(index=range(6), method='ffill')
expected = SparseDataFrame(data=[[nan, nan, nan],
[11., 12., 14.],
[21., 22., 24.],
[21., 22., 24.],
[41., 42., 44.],
[41., 42., 44.]],
index=range(6),
columns=[1, 2, 4],
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# Over columns
# default method
result = sparse.reindex(columns=range(6))
expected = SparseDataFrame(data=[[nan, 11., 12., nan, 14., nan],
[nan, 21., 22., nan, 24., nan],
[nan, 41., 42., nan, 44., nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method='bfill')
# method='ffill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method='ffill')
def test_take(self, float_frame):
result = float_frame.take([1, 0, 2], axis=1)
expected = float_frame.reindex(columns=['B', 'A', 'C'])
tm.assert_sp_frame_equal(result, expected)
def test_to_dense(self, float_frame, float_frame_int_kind,
float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check(frame, orig):
dense_dm = frame.to_dense()
# Sparse[float] != float
tm.assert_frame_equal(frame, dense_dm, check_dtype=False)
tm.assert_frame_equal(dense_dm, orig, check_dtype=False)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_stack_sparse_frame(self, float_frame, float_frame_int_kind,
float_frame_fill0, float_frame_fill2):
def _check(frame):
dense_frame = frame.to_dense() # noqa
wp = Panel.from_dict({'foo': frame})
from_dense_lp = wp.to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
tm.assert_numpy_array_equal(from_dense_lp.values,
from_sparse_lp.values)
_check(float_frame)
_check(float_frame_int_kind)
# for now
pytest.raises(Exception, _check, float_frame_fill0)
pytest.raises(Exception, _check, float_frame_fill2)
def test_transpose(self, float_frame, float_frame_int_kind,
float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check(frame, orig):
transposed = frame.T
untransposed = transposed.T
tm.assert_sp_frame_equal(frame, untransposed)
tm.assert_frame_equal(frame.T.to_dense(), orig.T)
tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T)
tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
def test_shift(self, float_frame, float_frame_int_kind, float_frame_dense,
float_frame_fill0, float_frame_fill0_dense,
float_frame_fill2, float_frame_fill2_dense):
def _check(frame, orig):
shifted = frame.shift(0)
exp = orig.shift(0)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(1)
exp = orig.shift(1)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(-2)
exp = orig.shift(-2)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(2, freq='B')
exp = orig.shift(2, freq='B')
exp = exp.to_sparse(frame.default_fill_value,
kind=frame.default_kind)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq=BDay())
exp = orig.shift(2, freq=BDay())
exp = exp.to_sparse(frame.default_fill_value,
kind=frame.default_kind)
tm.assert_frame_equal(shifted, exp)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
def test_count(self, float_frame):
dense_result = float_frame.to_dense().count()
result = float_frame.count()
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=None)
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=0)
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=1)
dense_result = float_frame.to_dense().count(axis=1)
# win32 don't check dtype
tm.assert_series_equal(result, dense_result, check_dtype=False)
def test_numpy_transpose(self):
sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=['a'])
result = np.transpose(np.transpose(sdf))
tm.assert_sp_frame_equal(result, sdf)
msg = "the 'axes' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.transpose(sdf, axes=1)
def test_combine_first(self, float_frame):
df = float_frame
result = df[::2].combine_first(df)
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, expected)
@pytest.mark.xfail(reason="No longer supported.")
def test_combine_first_with_dense(self):
# We could support this if we allow
# pd.core.dtypes.cast.find_common_type to special case SparseDtype
# but I don't think that's worth it.
df = self.frame
result = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, expected)
def test_combine_add(self, float_frame):
df = float_frame.to_dense()
df2 = df.copy()
df2['C'][:3] = np.nan
df['A'][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)
xp = sparse_df[sparse_df.flag == 1.]
rs = sparse_df[sparse_df.flag.isin([1.])]
tm.assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1 ** df
r1 = result.take([0], 1)['A']
r2 = result['A']
assert len(r2.sp_values) == len(r1.sp_values)
def test_as_blocks(self):
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},
dtype='float64')
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df_blocks = df.blocks
assert list(df_blocks.keys()) == ['Sparse[float64, nan]']
tm.assert_frame_equal(df_blocks['Sparse[float64, nan]'], df)
@pytest.mark.xfail(reason='nan column names in _init_dict problematic '
'(GH#16894)')
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
assert np.isnan(nan_colname_sparse.columns[0])
def test_isna(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.isna()
exp = pd.SparseDataFrame({'A': [True, True, False, False, True],
'B': [False, True, True, False, True]},
default_fill_value=True)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.isna()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [False, False, False, False, True],
'B': [False, True, False, False, True]})
tm.assert_frame_equal(res.to_dense(), exp)
def test_notna(self):
# GH 8276
df = pd.SparseDataFrame({'A': [np.nan, np.nan, 1, 2, np.nan],
'B': [0, np.nan, np.nan, 2, np.nan]})
res = df.notna()
exp = pd.SparseDataFrame({'A': [False, False, True, True, False],
'B': [True, False, False, True, False]},
default_fill_value=False)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame({'A': [0, 0, 1, 2, np.nan],
'B': [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.)
res = df.notna()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame({'A': [True, True, True, True, False],
'B': [True, False, True, True, False]})
tm.assert_frame_equal(res.to_dense(), exp)
class TestSparseDataFrameArithmetic(object):
def test_numeric_op_scalar(self):
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse())
def test_comparison_op_scalar(self):
# GH 13001
df = pd.DataFrame({'A': [nan, nan, 0, 1, ],
'B': [0, 1, 2, nan],
'C': [1., 2., 3., 4.],
'D': [nan, nan, nan, nan]})
sparse = df.to_sparse()
# comparison changes internal repr, compare with dense
res = sparse > 1
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df > 1)
res = sparse != 0
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df != 0)
class TestSparseDataFrameAnalytics(object):
def test_cumsum(self, float_frame):
expected = SparseDataFrame(float_frame.to_dense().cumsum())
result = float_frame.cumsum()
tm.assert_sp_frame_equal(result, expected)
result = float_frame.cumsum(axis=None)
tm.assert_sp_frame_equal(result, expected)
result = float_frame.cumsum(axis=0)
tm.assert_sp_frame_equal(result, expected)
def test_numpy_cumsum(self, float_frame):
result = np.cumsum(float_frame)
expected = SparseDataFrame(float_frame.to_dense().cumsum())
tm.assert_sp_frame_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(float_frame, dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(float_frame, out=result)
def test_numpy_func_call(self, float_frame):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var',
'mean', 'prod', 'cumprod',
'std', 'min', 'max']
for func in funcs:
getattr(np, func)(float_frame)
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH 17386)')
def test_quantile(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
q = 0.1
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH 17386)')
def test_quantile_multi(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
q = [0.1, 0.5]
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseDataFrame(dense_expected)
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
def test_assign_with_sparse_frame(self):
# GH 19163
df = pd.DataFrame({"a": [1, 2, 3]})
res = df.to_sparse(fill_value=False).assign(newcol=False)
exp = df.assign(newcol=False).to_sparse(fill_value=False)
tm.assert_sp_frame_equal(res, exp)
for column in res.columns:
assert type(res[column]) is SparseSeries
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("how", ["all", "any"])
def test_dropna(self, inplace, how):
# Tests regression #21172.
expected = pd.SparseDataFrame({"F2": [0, 1]})
input_df = pd.SparseDataFrame(
{"F1": [float('nan'), float('nan')], "F2": [0, 1]}
)
result_df = input_df.dropna(axis=1, inplace=inplace, how=how)
if inplace:
result_df = input_df
tm.assert_sp_frame_equal(expected, result_df)
| bsd-3-clause |
nuclear-wizard/moose | modules/porous_flow/examples/thm_example/compare_2D.py | 12 | 9121 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
# This script generates pictures that demonstrate the agreement between the analytic LaForce solutions and MOOSE
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def tara_read(fn, scaling = 1):
try:
f = open(os.path.join("gold", fn))
data = f.readlines()
data = [map(float, d.strip().split(",")) for d in data]
data = ([d[0] for d in data], [d[1] * scaling for d in data])
f.close()
except:
sys.stderr.write("Cannot read " + fn + ", or it contains erroneous data\n")
sys.exit(1)
return data
def moose(fn):
try:
f = open(os.path.join("gold", fn))
data = f.readlines()[1:-1]
data = [map(float, d.strip().split(",")) for d in data]
log10_max_val = np.log10(len(data) - 1)
num_pts_displayed = 70
subsample = [np.power(10, log10_max_val * i / float(num_pts_displayed - 1)) for i in range(num_pts_displayed)] # 1 to max_val in logarithmic progression
subsample = sorted(list(set([0] + [int(np.round(s)) for s in subsample]))) # 0 to len(data)-1 in log progression
data = [data[i] for i in subsample]
data = ([d[0] for d in data], [d[4] for d in data], [d[5] for d in data], [d[6] for d in data], [d[7] for d in data], [d[8] for d in data], [d[9] for d in data])
f.close()
except:
sys.stderr.write("Cannot read " + fn + ", or it contains erroneous data\n")
sys.exit(1)
return data
result_file_names = ["pp_one_hour.csv", "pp_one_day.csv", "pp_one_month.csv", "pp_five_years.csv"]
tara_pp = [tara_read(fn, scaling = 1E-6) for fn in result_file_names]
result_file_names = ["tt_one_hour.csv", "tt_one_day.csv", "tt_one_month.csv", "tt_five_years.csv"]
tara_tt = [tara_read(fn, scaling = 1) for fn in result_file_names]
result_file_names = ["sg_one_hour.csv", "sg_one_day.csv", "sg_one_month.csv", "sg_five_years.csv"]
tara_sg = [tara_read(fn, scaling = 1) for fn in result_file_names]
result_file_names = ["u_one_hour.csv", "u_one_day.csv", "u_one_month.csv", "u_five_years.csv"]
tara_u = [tara_read(fn, scaling = 1) for fn in result_file_names]
result_file_names = ["seff_rr_one_hour.csv", "seff_rr_one_day.csv", "seff_rr_one_month.csv", "seff_rr_five_years.csv"]
tara_seff_rr = [tara_read(fn, scaling = 1) for fn in result_file_names]
result_file_names = ["seff_tt_one_hour.csv", "seff_tt_one_day.csv", "seff_tt_one_month.csv", "seff_tt_five_years.csv"]
tara_seff_tt = [tara_read(fn, scaling = 1) for fn in result_file_names]
moose_timesteps = ["0062", "0098", "0135", "0180"]
moose_timesteps = ["0078", "0198", "0412", "0704"]
moosePTSUSS = [moose("2D_csv_ptsuss_" + ts + ".csv") for ts in moose_timesteps]
plt.figure()
plt.semilogx(moosePTSUSS[0][0], [(p - 18.3E6)/1E6 for p in moosePTSUSS[0][1]], 'b*', markersize=4, label = 'MOOSE (1 hour)')
plt.semilogx(tara_pp[0][0], tara_pp[0][1], 'b-', label = 'LaForce (1 hour)')
plt.semilogx(moosePTSUSS[1][0], [(p - 18.3E6)/1E6 for p in moosePTSUSS[1][1]], 'rs', markersize=4, label = 'MOOSE (1 day)')
plt.semilogx(tara_pp[1][0], tara_pp[1][1], 'r-', label = 'LaForce (1 day)')
plt.semilogx(moosePTSUSS[2][0], [(p - 18.3E6)/1E6 for p in moosePTSUSS[2][1]], 'g^', markersize=4, label = 'MOOSE (1 month)')
plt.semilogx(tara_pp[2][0], tara_pp[2][1], 'g-', label = 'LaForce (1 month)')
plt.semilogx(moosePTSUSS[3][0], [(p - 18.3E6)/1E6 for p in moosePTSUSS[3][1]], 'ko', markersize=4, label = 'MOOSE (5 years)')
plt.semilogx(tara_pp[3][0], tara_pp[3][1], 'k-', label = 'LaForce (5 years)')
plt.legend(loc = 'best', prop = {'size': 10})
plt.xlim([0.1, 5000])
plt.xlabel("r (m)")
plt.ylabel("Porepressure increase (MPa)")
plt.title("Porepressure")
plt.savefig("../../doc/content/media/porous_flow/2D_thm_compare_porepressure_fig.png")
plt.figure()
plt.semilogx(moosePTSUSS[0][0], moosePTSUSS[0][2], 'b*', markersize=4, label = 'MOOSE (1 hour)')
plt.semilogx(tara_tt[0][0], tara_tt[0][1], 'b-', label = 'LaForce (1 hour)')
plt.semilogx(moosePTSUSS[1][0], moosePTSUSS[1][2], 'rs', markersize=4, label = 'MOOSE (1 day)')
plt.semilogx(tara_tt[1][0], tara_tt[1][1], 'r-', label = 'LaForce (1 day)')
plt.semilogx(moosePTSUSS[2][0], moosePTSUSS[2][2], 'g^', markersize=4, label = 'MOOSE (1 month)')
plt.semilogx(tara_tt[2][0], tara_tt[2][1], 'g-', label = 'LaForce (1 month)')
plt.semilogx(moosePTSUSS[3][0], moosePTSUSS[3][2], 'ko', markersize=4, label = 'MOOSE (5 years)')
plt.semilogx(tara_tt[3][0], tara_tt[3][1], 'k-', label = 'LaForce (5 years)')
plt.legend(loc = 'best', prop = {'size': 10})
plt.xlim([0.1, 5000])
plt.xlabel("r (m)")
plt.ylabel("Temperature (K)")
plt.title("Temperature")
plt.savefig("../../doc/content/media/porous_flow/2D_thm_compare_temperature_fig.png")
plt.figure()
plt.semilogx(moosePTSUSS[0][0], [u * 1000 for u in moosePTSUSS[0][4]], 'b*', markersize=4, label = 'MOOSE (1 hour)')
plt.semilogx(tara_u[0][0], tara_u[0][1], 'b-', label = 'LaForce (1 hour)')
plt.semilogx(moosePTSUSS[1][0], [u * 1000 for u in moosePTSUSS[1][4]], 'rs', markersize=4, label = 'MOOSE (1 day)')
plt.semilogx(tara_u[1][0], tara_u[1][1], 'r-', label = 'LaForce (1 day)')
plt.semilogx(moosePTSUSS[2][0], [u * 1000 for u in moosePTSUSS[2][4]], 'g^', markersize=4, label = 'MOOSE (1 month)')
plt.semilogx(tara_u[2][0], tara_u[2][1], 'g-', label = 'LaForce (1 month)')
plt.semilogx(moosePTSUSS[3][0], [u * 1000 for u in moosePTSUSS[3][4]], 'ko', markersize=4, label = 'MOOSE (5 years)')
plt.semilogx(tara_u[3][0], tara_u[3][1], 'k-', label = 'LaForce (5 years)')
plt.legend(loc = 'best', prop = {'size': 10})
plt.xlim([0.1, 5000])
plt.xlabel("r (m)")
plt.ylabel("Displacement (mm)")
plt.title("Radial displacement")
plt.savefig("../../doc/content/media/porous_flow/2D_thm_compare_displacement_fig.png")
plt.figure()
plt.semilogx(moosePTSUSS[0][0], moosePTSUSS[0][3], 'b*', markersize=4, label = 'MOOSE (1 hour)')
plt.semilogx(tara_sg[0][0], tara_sg[0][1], 'b-', label = 'LaForce (1 hour)')
plt.semilogx(moosePTSUSS[1][0], moosePTSUSS[1][3], 'rs', markersize=4, label = 'MOOSE (1 day)')
plt.semilogx(tara_sg[1][0], tara_sg[1][1], 'r-', label = 'LaForce (1 day)')
plt.semilogx(moosePTSUSS[2][0], moosePTSUSS[2][3], 'g^', markersize=4, label = 'MOOSE (1 month)')
plt.semilogx(tara_sg[2][0], tara_sg[2][1], 'g-', label = 'LaForce (1 month)')
plt.semilogx(moosePTSUSS[3][0], moosePTSUSS[3][3], 'ko', markersize=4, label = 'MOOSE (5 years)')
plt.semilogx(tara_sg[3][0], tara_sg[3][1], 'k-', label = 'LaForce (5 years)')
plt.legend(loc = 'best', prop = {'size': 10})
plt.xlim([0.1, 5000])
plt.xlabel("r (m)")
plt.ylabel("Saturation")
plt.title("CO2 saturation")
plt.savefig("../../doc/content/media/porous_flow/2D_thm_compare_sg_fig.png")
plt.figure()
plt.semilogx(moosePTSUSS[0][0], [s/1E6 for s in moosePTSUSS[0][5]], 'b*', markersize=4, label = 'MOOSE (1 hour)')
plt.semilogx(tara_seff_rr[0][0], tara_seff_rr[0][1], 'b-', label = 'LaForce (1 hour)')
plt.semilogx(moosePTSUSS[1][0], [s/1E6 for s in moosePTSUSS[1][5]], 'rs', markersize=4, label = 'MOOSE (1 day)')
plt.semilogx(tara_seff_rr[1][0], tara_seff_rr[1][1], 'r-', label = 'LaForce (1 day)')
plt.semilogx(moosePTSUSS[2][0], [s/1E6 for s in moosePTSUSS[2][5]], 'g^', markersize=4, label = 'MOOSE (1 month)')
plt.semilogx(tara_seff_rr[2][0], tara_seff_rr[2][1], 'g-', label = 'LaForce (1 month)')
plt.semilogx(moosePTSUSS[3][0], [s/1E6 for s in moosePTSUSS[3][5]], 'ko', markersize=4, label = 'MOOSE (5 years)')
plt.semilogx(tara_seff_rr[3][0], tara_seff_rr[3][1], 'k-', label = 'LaForce (5 years)')
plt.legend(loc = 'best', prop = {'size': 10})
plt.xlim([0.1, 5000])
plt.xlabel("r (m)")
plt.ylabel("Stress (MPa)")
plt.title("Effective radial stress")
plt.savefig("../../doc/content/media/porous_flow/2D_thm_compare_seff_rr_fig.png")
plt.figure()
plt.semilogx(moosePTSUSS[0][0], [s/1E6 for s in moosePTSUSS[0][6]], 'b*', markersize=4, label = 'MOOSE (1 hour)')
plt.semilogx(tara_seff_tt[0][0], tara_seff_tt[0][1], 'b-', label = 'LaForce (1 hour)')
plt.semilogx(moosePTSUSS[1][0], [s/1E6 for s in moosePTSUSS[1][6]], 'rs', markersize=4, label = 'MOOSE (1 day)')
plt.semilogx(tara_seff_tt[1][0], tara_seff_tt[1][1], 'r-', label = 'LaForce (1 day)')
plt.semilogx(moosePTSUSS[2][0], [s/1E6 for s in moosePTSUSS[2][6]], 'g^', markersize=4, label = 'MOOSE (1 month)')
plt.semilogx(tara_seff_tt[2][0], tara_seff_tt[2][1], 'g-', label = 'LaForce (1 month)')
plt.semilogx(moosePTSUSS[3][0], [s/1E6 for s in moosePTSUSS[3][6]], 'ko', markersize=4, label = 'MOOSE (5 years)')
plt.semilogx(tara_seff_tt[3][0], tara_seff_tt[3][1], 'k-', label = 'LaForce (5 years)')
plt.legend(loc = 'best', prop = {'size': 10})
plt.xlim([0.1, 5000])
plt.xlabel("r (m)")
plt.ylabel("Stress (MPa)")
plt.title("Effective hoop stress")
plt.savefig("../../doc/content/media/porous_flow/2D_thm_compare_seff_tt_fig.png")
sys.exit(0)
| lgpl-2.1 |
samirma/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
LucasGandel/TubeTK | Base/Python/pyfsa/mapcl.py | 8 | 3869 | """mapcl.py
Demonstrate how to evaluate a maximum a-posteriori
graph classifier using N-fold cross-validation.
"""
__license__ = "Apache License, Version 2.0 (see TubeTK)"
__author__ = "Roland Kwitt, Kitware Inc., 2013"
__email__ = "E-Mail: [email protected]"
__status__ = "Development"
# Graph handling
import networkx as nx
# Machine learning
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import KFold
from sklearn.cross_validation import ShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing
from sklearn import svm
# Misc.
from optparse import OptionParser
import logging
import numpy as np
import scipy.sparse
import time
import sys
import os
# Fine-structure analysis
import core.fsa as fsa
import core.utils as utils
def main(argv=None):
if argv is None:
argv = sys.argv
# Setup vanilla CLI parsing and add custom arg(s).
parser = utils.setup_cli_parsing()
parser.add_option("",
"--mixComp",
help="number of GMM components.",
default=3,
type="int")
(options, args) = parser.parse_args()
# Setup logging
utils.setup_logging(options)
logger = logging.getLogger()
# Read graph file list and label file list
graph_file_list = utils.read_graph_file_list(options)
label_file_list = utils.read_label_file_list(options, graph_file_list)
# Read class info and grouping info
class_info = utils.read_class_info(options)
group_info = utils.read_group_info(options)
assert (group_info.shape[0] ==
len(class_info) ==
len(graph_file_list) ==
len(label_file_list))
# Zip lists together
data = zip(graph_file_list,
label_file_list,
class_info)
# Run fine-structure analysis
fsa_res = fsa.run_fsa(data,
options.radii,
options.recompute,
options.writeAs,
options.skip,
options.omitDegenerate)
data_mat = fsa_res['data_mat']
data_idx = fsa_res['data_idx']
# Create cross-validation folds (20% testing)
n_graphs = len(class_info)
cv = ShuffleSplit(n_graphs,
n_iter=options.cvRuns,
test_size=0.2,
random_state=0)
# Our unique class labels
label_set = np.unique(class_info)
if options.normalize:
logger.info("Running feature normalization ...")
scaler = preprocessing.StandardScaler(copy=False)
scaler.fit_transform(fsa_res['data_mat'])
scores = []
for cv_id, (trn, tst) in enumerate(cv):
models = []
for l in label_set:
l_idx = np.where(class_info == l)[0]
l_idx = np.asarray(l_idx).ravel()
l_trn = np.intersect1d(l_idx, trn)
pos = []
for i in l_trn:
tmp = np.where(fsa_res['data_idx']==i)[0]
pos.extend(list(tmp))
np_pos = np.asarray(pos)
gmm_model = fsa.estimate_gm(data_mat[np_pos,:], options.mixComp)
models.append(gmm_model)
predict = []
for i in tst:
pos = np.where(data_idx==i)[0]
map_idx = fsa.pp_gmm(data_mat[pos,:], models, argmax=True)
predict.append(label_set[map_idx])
# Score the MAP classifier
truth = [class_info[i] for i in tst]
score = accuracy_score(truth, predict)
print "yhat :", predict
print "gold :", truth
logger.info("Score (%.2d): %.2f" % (cv_id, 100*score))
scores.append(score)
utils.show_summary(scores)
if __name__ == "__main__":
main()
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 143 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
maxim5/time-series-machine-learning | util/data_util.py | 1 | 2901 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import datetime
import numpy as np
import pandas as pd
from .data_set import DataSet
from .logging import vlog
pd.set_option('display.expand_frame_repr', False)
def read_df(file_name):
df = pd.read_csv(file_name)
df.date = pd.to_datetime(df.date * 1000, unit='ms')
return df
def to_changes(raw):
if raw.date.dtype == np.int64:
raw.date = pd.to_datetime(raw.date * 1000, unit='ms')
return pd.DataFrame({
'date': raw.date,
'time': raw.date.astype(datetime.datetime).apply(lambda val: seconds(val) / (60*60*24)),
'high': raw.high.pct_change(),
'low': raw.low.pct_change(),
'open': raw.open.pct_change(),
'close': raw.close.pct_change(),
'vol': raw.volume.replace({0: 1e-5}).pct_change(),
'avg': raw.weightedAverage.pct_change(),
}, columns=['date', 'time', 'high', 'low', 'open', 'close', 'vol', 'avg'])
def seconds(datetime_):
return (datetime_ - datetime_.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()
def to_dataset(df, k, target_column, with_bias):
df = df[1:].reset_index(drop=True)
df = df.drop(['date'], axis=1)
target = df[target_column]
n, cols = df.shape
windows_num = n - k # effective window size, including the label, is k + 1
x = np.empty([windows_num, k * cols + int(with_bias)])
y = np.empty([windows_num])
for i in range(windows_num):
window = df[i:i+k]
row = window.as_matrix().reshape((-1,))
if with_bias:
row = np.insert(row, 0, 1)
x[i] = row
y[i] = target[i+k]
vlog('Data set: x=%s y=%s' % (x.shape, y.shape))
return DataSet(x, y)
def to_dataset_for_prediction(df, k, with_bias):
df = df[1:].reset_index(drop=True)
df = df.drop(['date'], axis=1)
n, cols = df.shape
windows_num = n - k + 1
x = np.empty([windows_num, k * cols + int(with_bias)])
for i in range(windows_num):
window = df[i:i+k]
row = window.as_matrix().reshape((-1,))
if with_bias:
row = np.insert(row, 0, 1)
x[i] = row
vlog('Data set for prediction:', x.shape)
return x
def split_dataset(dataset, ratio=None):
size = dataset.size
if ratio is None:
ratio = _choose_optimal_train_ratio(size)
mask = np.zeros(size, dtype=np.bool_)
train_size = int(size * ratio)
mask[:train_size] = True
np.random.shuffle(mask)
train_x = dataset.x[mask, :]
train_y = dataset.y[mask]
mask = np.invert(mask)
test_x = dataset.x[mask, :]
test_y = dataset.y[mask]
return DataSet(train_x, train_y), DataSet(test_x, test_y)
def _choose_optimal_train_ratio(size):
if size > 100000: return 0.95
if size > 50000: return 0.9
if size > 20000: return 0.875
if size > 10000: return 0.85
if size > 7500: return 0.825
if size > 5000: return 0.8
if size > 3000: return 0.775
if size > 2000: return 0.75
if size > 1000: return 0.7
return 0.7
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/indexes/timedeltas/test_indexing.py | 13 | 4622 | import pytest
from datetime import timedelta
import pandas.util.testing as tm
from pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta
class TestTimedeltaIndex(object):
_multiprocess_can_split_ = True
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
tm.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
assert not isinstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
with pytest.raises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
result = idx.delete(slice(n[0], n[-1] + 1))
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
| mit |
JeffHeard/terrapyn | geocms/views/rest_data.py | 1 | 13526 | from tempfile import NamedTemporaryFile
import json
from django.contrib.gis.geos import GEOSGeometry
import pandas
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from django.contrib.auth.models import User
from mezzanine.pages.models import Page
from tastypie.models import ApiKey
from terrapyn.geocms.drivers.spatialite import SpatialiteDriver
from terrapyn.geocms.models import DataResource
from terrapyn.geocms import dispatch
from terrapyn.geocms.utils import authorize, get_data_page_for_user, json_or_jsonp
def get_user(request):
if 'api_key' in request.REQUEST:
api_key = ApiKey.objects.get(key=request.REQUEST['api_key'])
return api_key.user
elif request.user.is_authenticated():
return User.objects.get(pk=request.user.pk)
else:
return request.user
def geojson_transform(request, data):
if request.REQUEST.get('format','wkt') == 'geojsonreal':
if isinstance(data, list):
return { 'type' : 'FeatureCollection', 'features' : [{ 'type' : 'Feature', 'geometry' : feature['GEOMETRY'], 'properties' : feature } for feature in data] }
else:
return { 'type' : 'Feature', 'properties' : data, 'geometry' : data['GEOMETRY'] }
else:
return data
def create_dataset(request):
user = authorize(request)
title = request.REQUEST.get('title','Untitled dataset')
srid = int(request.REQUEST.get('srid', 4326))
geometry_type=request.REQUEST.get('geometry_type', 'GEOMETRY')
columns_definitions=json.loads(request.REQUEST.get('columns_definitions', "{}"))
columns_definitions=((key, value) for key, value in columns_definitions.items())
if 'parent' in request.REQUEST:
parent = Page.objects.get(slug=request.REQUEST['parent'])
authorize(request, parent, add=True)
else:
parent = get_data_page_for_user(request.user)
ds = SpatialiteDriver.create_dataset(
title=title,
parent=parent,
srid=srid,
geometry_type=geometry_type,
columns_definitions=columns_definitions,
owner=request.user
)
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.dataset_created.send(sender=DataResource, instance=ds, user=user)
return json_or_jsonp(request, {'path' : ds.slug }, code=201)
def derive_dataset(request, slug):
user = authorize(request)
title = request.REQUEST.get('title', 'Untitled dataset')
parent_dataresource=slug
if 'parent_page' in request.REQUEST:
parent_page = Page.objects.get(slug=request.REQUEST['parent_page'])
authorize(request, parent_page, add=True)
else:
parent_page = get_data_page_for_user(request.user)
parent_dataresource = DataResource.objects.get(slug=parent_dataresource)
authorize(request, parent_dataresource, view=True)
ds = SpatialiteDriver.derive_dataset(
title=title,
parent_page=parent_page,
parent_dataresource=parent_dataresource,
owner=request.user
)
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.dataset_created.send(sender=DataResource, instance=ds, user=user)
return json_or_jsonp(request, {'path': ds.slug}, code=201)
def create_dataset_with_parent_geometry(request, slug):
user = authorize(request)
title = request.REQUEST.get('title', 'Untitled dataset')
parent_dataresource = slug
srid = int(request.REQUEST.get('srid', 4326))
geometry_type = request.REQUEST.get('geometry_type', 'GEOMETRY')
columns_definitions = json.loads(request.REQUEST.get('columns_definitions', "{}"))
columns_definitions = ((key, value) for key, value in columns_definitions.items())
parent_key = request.REQUEST.get('parent_key', None)
child_key = request.REQUEST.get('child_key', None)
csv = None
if len(request.FILES.keys()) > 0:
csvfile = NamedTemporaryFile(suffix='csv')
csvfile.write(request.FILES[request.FILES.keys().next()].read())
csvfile.flush()
csv = pandas.DataFrame.from_csv(csvfile.name)
if 'parent_page' in request.REQUEST:
parent_page = Page.objects.get(slug=request.REQUEST['parent_page'])
authorize(request, parent_page, add=True)
else:
parent_page = get_data_page_for_user(request.user)
parent_dataresource = DataResource.objects.get(slug=parent_dataresource)
authorize(request, parent_page, view=True)
if csv:
ds = SpatialiteDriver.join_data_with_existing_geometry(
title=title,
parent=parent_page,
new_data=csv,
join_field_in_existing_data=parent_key,
join_field_in_new_data=child_key,
parent_dataresource=parent_dataresource,
srid=srid,
geometry_type=geometry_type,
owner=request.user
)
else:
ds = SpatialiteDriver.create_dataset_with_parent_geometry(
title=title,
parent=parent_page,
parent_dataresource=parent_dataresource,
srid=srid,
columns_definitions=columns_definitions,
geometry_type=geometry_type,
owner=request.user
)
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.dataset_created.send(sender=ds, user=user)
return json_or_jsonp(request, {'path': ds.slug}, code=201)
def schema(request, slug=None, *args, **kwargs):
s = get_object_or_404(DataResource, slug=slug)
user = authorize(request, s, view=True)
r = [{'name': n} for n in s.driver_instance.schema()]
dispatch.api_accessed.send(sender=DataResource, instance=s, user=user)
return json_or_jsonp(request, r)
def full_schema(request, slug=None, *args, **kwargs):
s = get_object_or_404(DataResource, slug=slug)
user = authorize(request, s, view=True)
r = [{'name': n, 'kind' : t} for n, t in s.driver_instance.full_schema().items()]
dispatch.api_accessed.send(sender=DataResource, instance=s, user=user)
return json_or_jsonp(request, r)
@csrf_exempt
def add_column(request, slug=None, *args, **kwargs):
field_name = request.REQUEST['name']
field_type = request.REQUEST.get('type', 'text')
ds = get_object_or_404(DataResource, slug=slug)
user = authorize(request, ds, edit=True)
ds.driver_instance.add_column(field_name, field_type)
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.dataset_column_added.send(sender=DataResource, instance=ds, user=user)
return HttpResponse(status=201)
@csrf_exempt
def add_row(request, slug=None, *args, **kwargs):
ds = get_object_or_404(DataResource, slug=slug)
user = authorize(request, ds, edit=True)
schema = {k for k in ds.driver_instance.schema()}
row = {k: v for k, v in request.REQUEST.items() if k in schema}
try:
payload = json.loads(request.body)
for k in [x for x in payload if x in schema]:
row[k] = payload[k]
except:
pass # just in case there's JSON in the payuload
new_rec = ds.driver_instance.add_row(**row)
bbox = GEOSGeometry(row['GEOMETRY']).envelope
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.features_created.send(sender=DataResource, instance=ds, user=user, count=1, bbox=bbox)
return json_or_jsonp(request, new_rec, code=201)
@csrf_exempt
def update_row(request, slug=None, ogc_fid=None, *args, **kwargs):
ds = get_object_or_404(DataResource, slug=slug)
user = authorize(request, ds, edit=True)
schema = {k for k in ds.driver_instance.schema()}
row = { k : v for k, v in request.REQUEST.items() if k in schema }
try:
payload = json.loads(request.body)
for k in [x for x in payload if x in schema]:
row[k] = payload[k]
except:
pass # just in case there's JSON in the payuload
if ogc_fid is None:
ogc_fid = row['OGC_FID']
bbox = GEOSGeometry(ds.driver_instance.get_row(int(ogc_fid), geometry_format='wkt')['GEOMETRY']).envelope
result = ds.driver_instance.update_row(int(ogc_fid), **row)
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.features_updated.send(sender=DataResource, instance=ds, user=user, count=1, fid=ogc_fid, bbox=bbox)
return json_or_jsonp(request, result)
@csrf_exempt
def delete_row(request, slug=None, ogc_fid=None, *args, **kwargs):
ds = get_object_or_404(DataResource, slug=slug)
bbox = GEOSGeometry(ds.driver_instance.get_row(int(ogc_fid), geometry_format='wkt')['GEOMETRY']).envelope
ds.driver_instance.delete_row(int(ogc_fid))
user = authorize(request, ds, edit=True)
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.features_deleted.send(sender=DataResource, instance=ds, user=user, count=1, fid=ogc_fid, bbox=bbox)
return HttpResponse()
def get_row(request, slug=None, ogc_fid=None, *args, **kwargs):
ds = get_object_or_404(DataResource, slug=slug)
ds.driver_instance.ready_data_resource()
user = authorize(request, ds, view=True)
format = request.REQUEST.get('format', 'wkt')
try:
row = ds.driver_instance.get_row(int(ogc_fid), geometry_format=format if format != 'geojsonreal' else 'geojson')
except:
row = None
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.features_retrieved.send(sender=DataResource, instance=ds, user=user, count=1, fid=ogc_fid)
return json_or_jsonp(request, geojson_transform(request, row))
def get_rows(request, slug=None, ogc_fid_start=None, ogc_fid_end=None, limit=None, *args, **kwargs):
ds = get_object_or_404(DataResource, slug=slug)
user = authorize(request, ds, view=True)
ds.driver_instance.ready_data_resource()
format = request.REQUEST.get('format', 'wkt')
if ogc_fid_end:
rows = ds.driver_instance.get_rows(int(ogc_fid_start), int(ogc_fid_end), geometry_format=format if format != 'geojsonreal' else 'geojson')
elif limit:
rows = ds.driver_instance.get_rows(int(ogc_fid_start), limit=int(limit), geometry_format=format if format != 'geojsonreal' else 'geojson')
else:
rows = ds.driver_instance.get_rows(int(ogc_fid_start), geometry_format=format if format != 'geojsonreal' else 'geojson')
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.features_retrieved.send(sender=DataResource, instance=ds, user=user, count=len(rows))
return json_or_jsonp(request, geojson_transform(request, rows))
def query(request, slug=None, **kwargs):
ds = get_object_or_404(DataResource, slug=slug)
user = authorize(request, ds, view=True)
ds.driver_instance.ready_data_resource()
maybeint = lambda x: int(x) if x else None
geometry_mbr = [float(kwargs[k]) for k in ['x1', 'y1', 'x2', 'y2']] if 'x1' in kwargs else None
srid = kwargs['srid'] if 'srid' in kwargs else None
geometry = request.REQUEST.get('g', None)
geometry_format = request.REQUEST.get('format', 'geojson')
geometry_operator = request.REQUEST.get('op', 'intersects')
limit = maybeint(request.REQUEST.get('limit', None))
start = maybeint(request.REQUEST.get('start', None))
end = maybeint(request.REQUEST.get('end', None))
only = request.REQUEST.get('only', None)
if only:
only = only.split(',')
rest = {k: v for k, v in request.REQUEST.items() if
k not in {'limit', 'start', 'end', 'only', 'g', 'op', 'format', 'api_key','callback','jsonp', '_'}}
rows = ds.driver_instance.query(
query_mbr=geometry_mbr,
query_geometry=geometry,
geometry_format=geometry_format if geometry_format != 'geojsonreal' else 'geojson',
geometry_operator=geometry_operator,
query_geometry_srid=srid,
limit=limit,
start=start,
end=end,
only=only,
**rest
)
dispatch.api_accessed.send(sender=DataResource, instance=ds, user=user)
dispatch.features_retrieved.send(sender=DataResource, instance=ds, user=user, count=len(rows))
return json_or_jsonp(request, geojson_transform(request, rows))
class CRUDView(View):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CRUDView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
if 'ogc_fid' in kwargs:
return get_row(request, slug=kwargs['slug'], ogc_fid=kwargs['ogc_fid'])
elif 'ogc_fid_start' in kwargs:
return get_rows(
request,
slug=kwargs['slug'],
ogc_fid_start=kwargs['ogc_fid_start'],
ogc_fid_end=kwargs.get('ogc_fid_end', None),
limit=kwargs.get('limit', None),
)
else:
return query(
request,
slug=kwargs['slug'],
**request.GET
)
def post(self, request, *args, **kwargs):
return add_row(request, kwargs['slug'])
def put(self, request, *args, **kwargs):
return update_row(request, kwargs['slug'], kwargs.get('ogc_fid', None))
def delete(self, request, *args, **kwargs):
return delete_row(request, kwargs['slug'], kwargs.get('ogc_fid', None))
| apache-2.0 |
mitenjain/signalAlign | scripts/variantCallingLib.py | 2 | 11885 | #!/usr/bin/env python
"""Library for calling variants
"""
from __future__ import print_function
import sys
import os
import glob
import pandas as pd
import numpy as np
from random import shuffle
from signalAlignLib import SignalAlignment
from alignmentAnalysisLib import CallMethylation
from multiprocessing import Process, Queue, current_process, Manager
from serviceCourse.parsers import read_fasta
from serviceCourse.sequenceTools import reverse_complement
def randomly_select_alignments(path_to_alignments, max_alignments_to_use):
alignments = [x for x in glob.glob(path_to_alignments) if os.stat(x).st_size != 0]
if len(alignments) == 0:
print("[error] Didn't find any alignment files here {}".format(path_to_alignments))
sys.exit(1)
shuffle(alignments)
if len(alignments) < max_alignments_to_use:
return alignments
else:
return alignments[:max_alignments_to_use]
def get_forward_mask(list_of_alignments, suffix):
mask = []
for alignment in list_of_alignments:
if alignment.endswith(".backward.tsv{}".format(suffix)):
mask.append(False)
else:
mask.append(True)
return mask
def get_alignments_labels_and_mask(path_to_alignments, max, suffix=""):
alignments = randomly_select_alignments(path_to_alignments, max)
mask = get_forward_mask(alignments, suffix)
return alignments, mask
def get_reference_sequence(path_to_fasta):
seqs = []
for header, comment, sequence in read_fasta(path_to_fasta):
seqs.append(sequence)
assert len(seqs) > 0, "Didn't find any sequences in the reference file"
if len(seqs) > 1:
print("[NOTICE] Found more than one sequence in the reference file, using the first one")
return seqs[0]
def make_degenerate_reference(input_sequence, positions, forward_sequence_path, backward_sequence_path,
block_size=1):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented, will be the size of the Ns to add (eg. NN = block_size 2)
:return (subbed sequence, complement subbed sequence)
"""
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
if positions is not None:
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
t_seq = ''.join(t_seq)
c_seq = ''.join(c_seq)
else:
t_seq = input_sequence
c_seq = complement_sequence
with open(forward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=t_seq))
with open(backward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=c_seq))
return True
def load_variant_call_data(file_path):
data = pd.read_table(file_path,
usecols=(0, 1, 2, 3, 4, 5, 6),
names=['site', 'strand', 'pA', 'pC', 'pG', 'pT', 'read'],
dtype={'site': np.int64,
'strand': np.str,
'pC': np.float64,
'pmC': np.float64,
'phmC': np.float64,
'read': np.str,
})
return data
def symbol_to_base(symbol):
return ["A", "C", "G", "T"][symbol]
def rc_probs(probs):
return [probs[3], probs[2], probs[1], probs[0]]
def call_sites_with_marginal_probs(data, reference_sequence_string, min_depth=0, get_sites=False):
d = load_variant_call_data(data)
reference_sequence_list = list(reference_sequence_string)
candidate_sites = []
add_to_candidates = candidate_sites.append
for g, x in d.groupby("site"):
marginal_forward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
marginal_backward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
assert(len(x['site'].unique()) == 1)
site = x['site'].unique()[0]
if len(x['read']) < min_depth:
continue
for i, read in x.iterrows():
if ((read['read'].endswith(".forward.tsv") and read['strand'] == 't') or
(read['read'].endswith(".backward.tsv") and read['strand'] == 'c')):
direction = True
else:
direction = False
if direction:
marginal_forward_p += read[['pA', 'pC', 'pG', 'pT']]
else:
marginal_backward_p += read[['pA', 'pC', 'pG', 'pT']]
marginal_prob = marginal_forward_p + rc_probs(marginal_backward_p)
normed_marginal_probs = marginal_prob.map(lambda y: y / sum(marginal_prob))
called_base = normed_marginal_probs.argmax()[1]
if called_base != reference_sequence_list[site]:
if get_sites is False:
print("Changing {orig} to {new} at {site} depth {depth}"
"".format(orig=reference_sequence_list[site], new=called_base, site=site, depth=len(x['read'])))
reference_sequence_list[site] = called_base
else:
print("Proposing edit at {site} from {orig} to {new}, \n{probs}"
"".format(orig=reference_sequence_list[site], new=called_base, site=site,
probs=normed_marginal_probs))
difference = normed_marginal_probs.max() - normed_marginal_probs["p" + reference_sequence_list[site]]
print(difference)
add_to_candidates((site, difference))
if get_sites is True:
return candidate_sites
else:
return ''.join(reference_sequence_list)
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def variant_caller(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
c = CallMethylation(**f)
c.write()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def run_service(service, service_iterable, service_arguments, workers, iterable_argument):
# setup workers for multiprocessing
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
for x in service_iterable:
args = dict({iterable_argument: x},
**service_arguments)
work_queue.put(args)
for w in xrange(workers):
p = Process(target=service, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
def make_reference_files_and_alignment_args(working_folder, reference_sequence_string, alignment_args,
n_positions=None):
# make paths for working txt files that contain this STEPs Ns
forward_reference = working_folder.add_file_path("forward_reference.txt")
backward_reference = working_folder.add_file_path("backward_reference.txt")
# make N-ed reference sequence for this iteration, writes the strings to files
check = make_degenerate_reference(reference_sequence_string, n_positions,
forward_reference, backward_reference)
assert check, "Problem making degenerate reference"
# perform alignment for this step
alignment_args["forward_reference"] = forward_reference
alignment_args["backward_reference"] = backward_reference
return True
def scan_for_proposals(working_folder, step, reference_sequence_string, list_of_fast5s, alignment_args, workers):
reference_sequence_length = len(reference_sequence_string)
assert reference_sequence_length > 0, "Got empty string for reference sequence."
# proposals will contain the sites that we're going to change to N
proposals = []
for s in xrange(step):
scan_positions = range(s, reference_sequence_length, step)
check = make_reference_files_and_alignment_args(working_folder, reference_sequence_string,
alignment_args, n_positions=scan_positions)
assert check, "Problem making degenerate reference for step {step}".format(step=s)
run_service(aligner, list_of_fast5s, alignment_args, workers, "in_fast5")
# alignments is the list of alignments to gather proposals from
alignments = [x for x in glob.glob(working_folder.path + "*.tsv") if os.stat(x).st_size != 0]
if len(alignments) == 0:
print("[error] Didn't find any alignment files here {}".format(working_folder.path))
sys.exit(1)
marginal_probability_file = working_folder.add_file_path("marginals.{step}.calls".format(step=s))
proposal_args = {
"sequence": None,
"out_file": marginal_probability_file,
"positions": {"forward": scan_positions, "backward": scan_positions},
"degenerate_type": alignment_args["degenerate"]
}
#for alignment in alignments:
# a = dict({"alignment_file": alignment}, **proposal_args)
# c = CallMethylation(**a)
# c.write()
run_service(variant_caller, alignments, proposal_args, workers, "alignment_file")
# get proposal sites
proposals += call_sites_with_marginal_probs(marginal_probability_file, reference_sequence_string,
min_depth=0, get_sites=True)
# remove old alignments
for f in glob.glob(working_folder.path + "*.tsv"):
os.remove(f)
# proposals is a list of lists containing (position, delta_prob) where position in the position in the
# reference sequence that is being proposed to be edited, and delta_prob is the difference in probability
# of the reference base to the proposed base
return proposals
def update_reference_with_marginal_probs(working_folder, proposals, reference_sequence_string, list_of_fast5s,
alignment_args, workers):
check = make_reference_files_and_alignment_args(working_folder, reference_sequence_string, alignment_args,
n_positions=proposals)
assert check, "[update_reference_with_marginal_probs]: problem making reference files and args dict"
run_service(aligner, list_of_fast5s, alignment_args, workers, "in_fast5")
alignments = [x for x in glob.glob(working_folder.path + "*.tsv") if os.stat(x).st_size != 0]
marginal_probability_file = working_folder.add_file_path("proposals.calls")
proposal_args = {
"sequence": None,
"out_file": marginal_probability_file,
"positions": {"forward": proposals, "backward": proposals},
"degenerate_type": alignment_args["degenerate"]
}
#for alignment in alignments:
# a = dict({"alignment_file": alignment}, **proposal_args)
# c = CallMethylation(**a)
# c.write()
run_service(variant_caller, alignments, proposal_args, workers, "alignment_file")
# get proposal sites
updated_reference_sequence = call_sites_with_marginal_probs(marginal_probability_file, reference_sequence_string,
min_depth=0, get_sites=True)
# clean up
working_folder.remove_file(marginal_probability_file)
# remove old alignments
for f in glob.glob(working_folder.path + "*.tsv"):
os.remove(f)
return updated_reference_sequence
| mit |
ocefpaf/system-test | Theme_2_Extreme_Events/Scenario_2A/Extremes_Currents/Extreme_Currents.py | 3 | 17130 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import time
start_runtime = time.time()
# <markdowncell>
# ># IOOS System Test: [Extreme Events Theme:](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#theme-2-extreme-events) Coastal Inundation
# <markdowncell>
# ### Can we obtain observed current data at stations located within a bounding box?
# This notebook is based on IOOS System Test: Inundation
# <markdowncell>
# Methodology:
# * Define temporal and spatial bounds of interest, as well as
# parameters of interest
# * Search for available service endpoints in the NGDC CSW catalog
# meeting search criteria
# * Search for available OPeNDAP data endpoints
# * Obtain observation data sets from stations within the spatial
# boundaries (from CO-OPS and NDBC)
# * Extract time series for identified stations
# * Plot time series data, current rose, annual max values per station
# * Plot observation stations on a map
# <markdowncell>
# #### import required libraries
# <codecell>
import os
import os.path
from datetime import datetime, timedelta
import uuid
import folium
import matplotlib.pyplot as plt
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import numpy as np
from pandas import read_csv
from pyoos.collectors.ndbc.ndbc_sos import NdbcSos
from pyoos.collectors.coops.coops_sos import CoopsSos
from utilities import (fes_date_filter, service_urls, get_coordinates,
inline_map, css_styles, processStationInfo,
get_ncfiles_catalog, new_axes, set_legend)
css_styles()
# <markdowncell>
# <div class="warning"><strong>Temporal Bounds</strong> -
# Anything longer than one year kills the CO-OPS service</div>
# <codecell>
bounding_box_type = "box"
# Bounding Box [lon_min, lat_min, lon_max, lat_max]
area = {'Hawaii': [-160.0, 18.0, -154., 23.0],
'Gulf of Maine': [-72.0, 41.0, -69.0, 43.0],
'New York harbor region': [-75., 39., -71., 41.5],
'Puerto Rico': [-71, 14, -60, 24],
'East Coast': [-77, 34, -70, 40],
'North West': [-130, 38, -121, 50]}
bounding_box = area['North West']
# Temporal range.
jd_now = datetime.utcnow()
jd_start, jd_stop = jd_now - timedelta(days=(365*10)), jd_now
start_date = jd_start.strftime('%Y-%m-%d %H:00')
stop_date = jd_stop.strftime('%Y-%m-%d %H:00')
jd_start = datetime.strptime(start_date, '%Y-%m-%d %H:%M')
jd_stop = datetime.strptime(stop_date, '%Y-%m-%d %H:%M')
print('%s to %s ' % (start_date, stop_date))
# <codecell>
# Put the names in a dict for ease of access.
data_dict = {}
sos_name = 'Currents'
data_dict['currents'] = {"names": ['currents',
'surface_eastward_sea_water_velocity',
'*surface_eastward_sea_water_velocity*'],
"sos_name": ['currents']}
# <markdowncell>
# CSW Search
# <codecell>
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC Geoportal.
csw = CatalogueServiceWeb(endpoint, timeout=60)
# <markdowncell>
# Search
# <codecell>
# Convert User Input into FES filters.
start, stop = fes_date_filter(start_date, stop_date)
bbox = fes.BBox(bounding_box)
# Use the search name to create search filter.
kw = dict(propertyname='apiso:AnyText', escapeChar='\\',
wildCard='*', singleChar='?')
or_filt = fes.Or([fes.PropertyIsLike(literal=('*%s*' % val), **kw) for
val in data_dict['currents']['names']])
val = 'Averages'
not_filt = fes.Not([fes.PropertyIsLike(literal=('*%s*' % val), **kw)])
filter_list = [fes.And([bbox, start, stop, or_filt, not_filt])]
# Connect to CSW, explore it's properties
# try request using multiple filters "and" syntax: [[filter1, filter2]]
csw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full')
print("%s csw records found" % len(csw.records))
for rec, item in csw.records.items():
print(item.title)
# <markdowncell>
# DAP
# <codecell>
dap_urls = service_urls(csw.records)
# Remove duplicates and organize.
dap_urls = sorted(set(dap_urls))
print("Total DAP: %s" % len(dap_urls))
# Print the first 5:
print("\n".join(dap_urls[:]))
# <markdowncell>
# Get SOS links, NDBC is not available so add it...
# <codecell>
sos_urls = service_urls(csw.records, service='sos:url')
# Remove duplicates and organize.
sos_urls = sorted(set(sos_urls))
print("Total SOS: %s" % len(sos_urls))
print("\n".join(sos_urls))
# <markdowncell>
# #### Update SOS time-date
# <codecell>
start_time = datetime.strptime(start_date, '%Y-%m-%d %H:%M')
end_time = datetime.strptime(stop_date, '%Y-%m-%d %H:%M')
iso_start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
iso_end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
# <markdowncell>
# <div class="success"><strong>Get list of stations</strong>
# - we get a list of the available stations from NOAA and COOPS</div>
# <markdowncell>
# #### Initialize Station Data List
# <codecell>
st_list = {}
# <markdowncell>
# #### Get CO-OPS Station Data
# <codecell>
coops_collector = CoopsSos()
coops_collector.start_time = start_time
coops_collector.end_time = end_time
coops_collector.variables = data_dict["currents"]["sos_name"]
coops_collector.server.identification.title
ofrs = coops_collector.server.offerings
print("%s:%s" % (coops_collector.start_time, coops_collector.end_time))
print(len(ofrs))
# <markdowncell>
# #### gets a list of the active stations from coops
# <codecell>
box_str = ','.join(str(e) for e in bounding_box)
url = (('http://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/SOS?'
'service=SOS&request=GetObservation&version=1.0.0&'
'observedProperty=%s&bin=1&'
'offering=urn:ioos:network:NOAA.NOS.CO-OPS:CurrentsActive&'
'featureOfInterest=BBOX:%s&responseFormat=text/csv') %
(sos_name, box_str))
obs_loc_df = read_csv(url)
print(url)
print("Date: %s to %s" % (iso_start, iso_end))
print("Lat/Lon Box: %s" % box_str)
# <markdowncell>
# #### COOPS Station Information
# <codecell>
st_list = processStationInfo(obs_loc_df, st_list, "coops")
# <codecell>
st_list
# <markdowncell>
# #### Get NDBC Station Data
# <codecell>
ndbc_collector = NdbcSos()
ndbc_collector.start_time = start_time
ndbc_collector.end_time = end_time
ndbc_collector.variables = data_dict["currents"]["sos_name"]
ndbc_collector.server.identification.title
print("%s:%s" % (ndbc_collector.start_time, ndbc_collector.end_time))
ofrs = ndbc_collector.server.offerings
print(len(ofrs))
# <codecell>
print("Date: %s to %s" % (iso_start, iso_end))
box_str = ','.join(str(e) for e in bounding_box)
print("Lat/Lon Box: %s" % box_str)
url = (('http://sdf.ndbc.noaa.gov/sos/server.php?'
'request=GetObservation&service=SOS&'
'version=1.0.0&'
'offering=urn:ioos:network:noaa.nws.ndbc:all&'
'featureofinterest=BBOX:%s&'
'observedproperty=%s&'
'responseformat=text/csv&') % (box_str, sos_name))
print(url)
obs_loc_df = read_csv(url)
# <markdowncell>
# #### NDBC Station information
# <codecell>
st_list = processStationInfo(obs_loc_df, st_list, "ndbc")
st_list
# <codecell>
print(st_list[st_list.keys()[0]]['lat'])
print(st_list[st_list.keys()[0]]['lon'])
# <markdowncell>
# #### The function only support who date time differences
# <markdowncell>
# <div class="error">
# <strong>Large Temporal Requests Need To Be Broken Down</strong> -
# When requesting a large temporal range outside the SOS limit, the sos
# request needs to be broken down. See issues in
# [ioos](https://github.com/ioos/system-test/issues/81),
# [ioos](https://github.com/ioos/system-test/issues/101),
# [ioos](https://github.com/ioos/system-test/issues/116)
# and
# [pyoos](https://github.com/ioos/pyoos/issues/35). Unfortunately currents
# is not available via DAP
# ([ioos](https://github.com/ioos/system-test/issues/116))</div>
# <markdowncell>
# <div class="error">
# <strong>Large Temporal Requests Need To Be Broken Down</strong> -
# Obtaining long time series from COOPS via SOS is not ideal and the opendap
# links are not available, so we use the tides and currents api to get the
# currents in json format. The api response provides in default bin, unless a
# bin is specified (i.e bin=1)</div>
# <markdowncell>
# <div class="warning"><strong>Pyoos</strong> -
# Should be able to use the collector, but does not work?</div>
# <markdowncell>
# <div class="info">
# <strong>Use NDBC DAP endpoints to get time-series data</strong> -
# The DAP server for currents is available for NDBC data, we use that
# to get long time series data.</div>
# <markdowncell>
# <div class="info"><strong>Progress Information For Large Requests</strong> -
# Shows the user a progress bar for each stations as its processed. Click
# [here]('http://www.tidesandcurrents.noaa.gov/cdata/StationList?type=Current+Data&filter=active')
# to show more information on the CO-OPS locations</div>
# <markdowncell>
# <div class="error"><strong>Processing long time series</strong> -
# The CO-OPS Server responds really slow (> 30 secs, for what should be
# a 5 sec request) to multiple requests, so getting long time series
# data is almost impossible.</div>
# <markdowncell>
# #### get CO-OPS station data
# <codecell>
# Used to define the number of days allowable by the service.
coops_point_max_days = ndbc_point_max_days = 30
print("start & end dates: %s, %s\n" % (jd_start, jd_stop))
for station_index in st_list.keys():
# Set it so we can use it later.
st = station_index.split(":")[-1]
print('[%s]: %s' % (st_list[station_index]['source'], station_index))
divid = str(uuid.uuid4())
if st_list[station_index]['source'] == 'coops':
# Coops fails for large requests.
master_df = []
elif st_list[station_index]['source'] == 'ndbc':
# Use the dap catalog to get the data.
master_df = get_ncfiles_catalog(station_index, jd_start, jd_stop)
if len(master_df) > 0:
st_list[station_index]['hasObsData'] = True
st_list[station_index]['obsData'] = master_df
# <codecell>
# Check theres data in there.
st_list[st_list.keys()[2]]
# <markdowncell>
# ### Plot the pandas data frames for the stations
# <markdowncell>
# <div class="error"><strong>Station Data Plot</strong> -
# There might be an issue with some of the NDBC station data...</div>
# <codecell>
for station_index in st_list.keys():
df = st_list[station_index]['obsData']
if len(df) > 1:
st_list[station_index]['hasObsData'] = True
print("num rows: %s" % len(df))
fig = plt.figure(figsize=(18, 3))
plt.scatter(df.index, df['sea_water_speed (cm/s)'])
fig.suptitle('Station:'+station_index, fontsize=20)
plt.xlabel('Date', fontsize=18)
plt.ylabel('sea_water_speed (cm/s)', fontsize=16)
else:
st_list[station_index]['hasObsData'] = False
# <markdowncell>
# #### Find the min and max data values
# <markdowncell>
# <div class="warning"><strong>Station Data Plot</strong> -
# Some stations might not plot due to the data.</div>
# <codecell>
# Build current roses.
filelist = [f for f in os.listdir("./images") if f.endswith(".png")]
for f in filelist:
os.remove("./images/{}".format(f))
station_min_max = {}
for station_index in st_list.keys():
all_spd_data = {}
all_dir_data = {}
all_time_spd = []
all_time_dir = []
df = st_list[station_index]['obsData']
if len(df) > 1:
try:
spd_data = df['sea_water_speed (cm/s)'].values
spd_data = np.array(spd_data)
dir_data = df['direction_of_sea_water_velocity (degree)'].values
dir_data = np.array(dir_data)
time_data = df.index.tolist()
time_data = np.array(time_data)
# NOTE: This data cleanup can a vectorized function.
for idx in range(0, len(spd_data)):
if spd_data[idx] > 998:
continue
elif np.isnan(spd_data[idx]):
continue
elif dir_data[idx] == 0:
continue
else:
dt_year = time_data[idx].year
dt_year = str(dt_year)
if dt_year not in all_spd_data.keys():
all_spd_data[dt_year] = []
all_dir_data[dt_year] = []
# Convert to knots.
knot_val = (spd_data[idx] * 0.0194384449)
knot_val = "%.4f" % knot_val
knot_val = float(knot_val)
all_spd_data[dt_year].append(knot_val)
all_dir_data[dt_year].append(dir_data[idx])
all_time_spd.append(knot_val)
all_time_dir.append(dir_data[idx])
all_time_spd = np.array(all_time_spd, dtype=np.float)
all_time_dir = np.array(all_time_dir, dtype=np.float)
station_min_max[station_index] = {}
for year in all_spd_data.keys():
year_spd = np.array(all_spd_data[year])
year_dir = np.array(all_dir_data[year])
station_min_max[station_index][year] = {}
station_min_max[station_index][year]['pts'] = len(year_spd)
min_spd, max_spd = np.min(year_spd), np.max(year_spd)
station_min_max[station_index][year]['spd_min'] = min_spd
station_min_max[station_index][year]['spd_max'] = max_spd
dir_min, dir_max = np.argmin(year_spd), np.argmax(year_spd)
yr_dir_min, yr_dir_max = year_dir[dir_min], year_dir[dir_max]
station_min_max[station_index][year]['dir_at_min'] = yr_dir_min
station_min_max[station_index][year]['dir_at_max'] = yr_dir_max
try:
# A stacked histogram with normed
# (displayed in percent) results.
ax = new_axes()
ax.set_title(station_index.split(":")[-1] +
" stacked histogram with normed (displayed in %)"
"\nresults (spd in knots), All Time.")
ax.bar(all_time_dir, all_time_spd, normed=True,
opening=0.8, edgecolor='white')
set_legend(ax)
fig = plt.gcf()
fig.set_size_inches(8, 8)
fname = './images/%s.png' % station_index.split(":")[-1]
fig.savefig(fname, dpi=100)
except Exception as e:
print("Error when plotting %s" % e)
pass
except Exception as e: # Be specific here!
print("Error: %s" % e)
pass
# <codecell>
# Plot the min and max from each station.
fields = ['spd_']
for idx in range(0, len(fields)):
d_field = fields[idx]
fig, ax = plt.subplots(1, 1, figsize=(18, 5))
for st in station_min_max:
x, y_min, y_max = [], [], []
for year in station_min_max[st]:
x.append(year)
y_max.append(station_min_max[st][year][d_field+'max'])
marker_size = station_min_max[st][year]['pts'] / 80
marker_size += 20
station_label = st.split(":")[-1]
ax.scatter(np.array(x), np.array(y_max),
label=station_label, s=marker_size,
c=np.random.rand(3, 1), marker="o")
ax.set_xlim([2000, 2015])
ax.set_title("Yearly Max Speed Per Station, Marker Scaled Per "
"Annual Pts (bigger = more pts per year)")
ax.set_ylabel("speed (knots)")
ax.set_xlabel("Year")
ax.legend(loc='upper left')
# <markdowncell>
# #### Produce Interactive Map
# <codecell>
station = st_list[st_list.keys()[0]]
m = folium.Map(location=[station["lat"], station["lon"]], zoom_start=4)
m.line(get_coordinates(bounding_box, bounding_box_type),
line_color='#FF0000', line_weight=5)
# Plot the obs station.
for st in st_list:
hasObs = st_list[st]['hasObsData']
if hasObs:
fname = './images/%s.png' % st.split(":")[-1]
if os.path.isfile(fname):
popup = ('Obs Location:<br>%s<br><img border=120 src="'
'./images/%s.png" width="242" height="242">' %
(st, st.split(":")[-1]))
m.simple_marker([st_list[st]["lat"], st_list[st]["lon"]],
popup=popup,
marker_color="green",
marker_icon="ok")
else:
popup = 'Obs Location:<br>%s' % st
m.simple_marker([st_list[st]["lat"], st_list[st]["lon"]],
popup=popup,
marker_color="green",
marker_icon="ok")
else:
popup = 'Obs Location:<br>%s' % st
m.simple_marker([st_list[st]["lat"], st_list[st]["lon"]],
popup=popup,
marker_color="red",
marker_icon="remove")
inline_map(m)
# <codecell>
elapsed = time.time() - start_runtime
print('{:.2f} minutes'.format(elapsed / 60.))
| unlicense |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/backends/backend_macosx.py | 11 | 15754 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib import rcParams
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
class Show(ShowBase):
def mainloop(self):
_macosx.show()
show = Show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forward the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.gc.set_dpi(self.dpi)
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
linewidth = gc.get_linewidth()
gc.draw_path(path, transform, linewidth, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
linewidth = gc.get_linewidth()
gc.draw_markers(marker_path, marker_trans, path, trans, linewidth, rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
if offset_position=='data':
offset_position = True
else:
offset_position = False
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
master_transform = master_transform.get_matrix()
offsetTrans = offsetTrans.get_matrix()
gc.draw_path_collection(master_transform, path_ids, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds,
offset_position)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
gc.draw_quad_mesh(master_transform.get_matrix(),
meshWidth,
meshHeight,
coordinates,
offsets,
offsetTrans.get_matrix(),
facecolors,
antialiased,
edgecolors)
def new_gc(self):
self.gc.save()
self.gc.set_hatch(None)
self.gc._alpha = 1.0
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def draw_gouraud_triangle(self, gc, points, colors, transform):
points = transform.transform(points)
gc.draw_gouraud_triangle(points, colors)
def get_image_magnification(self):
return self.gc.get_image_magnification()
def draw_image(self, gc, x, y, im):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
gc.draw_image(x, y, nrows, ncols, data)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# todo, handle props, angle, origins
scale = self.gc.get_image_magnification()
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi*scale)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
scale = self.gc.get_image_magnification()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi*scale, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
weight = prop.get_weight()
style = prop.get_style()
points = prop.get_size_in_points()
size = self.points_to_pixels(points)
gc.draw_text(x, y, six.text_type(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# todo: handle props
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
weight = prop.get_weight()
style = prop.get_style()
points = prop.get_size_in_points()
size = self.points_to_pixels(points)
width, height, descent = self.gc.get_text_width_height_descent(
six.text_type(s), family, size, weight, style)
return width, height, 0.0*descent
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
_macosx.GraphicsContext.set_alpha(self, _alpha, self.get_forced_alpha())
rgb = self.get_rgb()
_macosx.GraphicsContext.set_foreground(self, rgb)
def set_foreground(self, fg, isRGBA=False):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
rgb = self.get_rgb()
_macosx.GraphicsContext.set_foreground(self, rgb)
def set_graylevel(self, fg):
GraphicsContextBase.set_graylevel(self, fg)
_macosx.GraphicsContext.set_graylevel(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class TimerMac(_macosx.Timer, TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation
run loops for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
# completely implemented at the C-level (in _macosx.Timer)
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['gif'] = 'Graphics Interchange Format'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def _print_bitmap(self, filename, *args, **kwargs):
# In backend_bases.py, print_figure changes the dpi of the figure.
# But since we are essentially redrawing the picture, we need the
# original dpi. Pick it up from the renderer.
dpi = kwargs['dpi']
old_dpi = self.figure.dpi
self.figure.dpi = self.renderer.dpi
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
filename = six.text_type(filename)
self.write_bitmap(filename, width, height, dpi)
self.figure.dpi = old_dpi
def print_bmp(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_jpg(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_jpeg(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_tif(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_tiff(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_gif(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerMac(*args, **kwargs)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
if matplotlib.is_interactive():
self.show()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1))
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self, *args):
filename = _macosx.choose_save_file('Save the figure',
self.canvas.get_default_filename())
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
def dynamic_update(self):
self.canvas.draw_idle()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasMac
FigureManager = FigureManagerMac
| mit |
alivecor/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 31 | 60315 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
rsivapr/scikit-learn | sklearn/cluster/tests/test_k_means.py | 3 | 22974 | """Testing for K-means"""
import sys
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.fixes import unique
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.cluster._k_means import csr_row_norm_l2
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_square_norms():
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_from_csr = csr_row_norm_l2(X_csr)
assert_array_almost_equal(x_squared_norms,
x_squared_norms_from_csr, 5)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
with warnings.catch_warnings(record=True) as w:
assert_array_equal(km.labels_, km.predict(X))
assert_equal(len(w), 1)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = csr_row_norm_l2(X_csr)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
"""Check that dense and sparse minibatch update give the same results"""
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = csr_row_norm_l2(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_check_fitted():
km = KMeans(n_clusters=n_clusters, random_state=42)
assert_raises(AttributeError, km._check_fitted)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _is_mac_os_version(version):
"""Returns True iff Mac OS X and newer than specified version."""
import platform
mac_version, _, _ = platform.mac_ver()
return mac_version.split('.')[:2] == version.split('.')[:2]
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
def test_k_means_plus_plus_init_2_jobs():
if _is_mac_os_version('10.7'):
raise SkipTest('Multi-process bug in Mac OS X Lion (see issue #636)')
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
with warnings.catch_warnings(record=True) as warn_queue:
mb_k_means.fit(X)
assert_equal(len(warn_queue), 1)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=1,
random_state=42)
mb_k_means.fit(this_X)
centers_before = mb_k_means.cluster_centers_.copy()
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(n_clusters),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
centers_after = mb_k_means.cluster_centers_.copy()
# Check that all the centers have moved
assert_greater(((centers_before - centers_after)**2).sum(axis=1).min(),
.2)
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=1,
init=centers.copy(),
random_state=42)
mb_k_means.fit(this_X)
centers_before = mb_k_means.cluster_centers_.copy()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(n_clusters),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
"""Check if copy_x=False returns nearly equal X after de-centering."""
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
"""Check k_means with a bad initialization does not yield a singleton
Starting with bad centers that are quickly ignored should not
result in a repositioning of the centers to the center of mass that
would lead to collapsed centers which in turns make the clustering
dependent of the numerical unstabilities.
"""
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2, init=init_int).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
"""Check that increasing the number of init increases the quality"""
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
with warnings.catch_warnings(record=True) as w:
k_means(X, n_clusters=n_clusters, init=centers)
assert_equal(len(w), 1)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
ghdk/networkx | examples/graph/knuth_miles.py | 50 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
gkunter/coquery | coquery/installer/coq_install_coha.py | 1 | 8120 | # -*- coding: utf-8 -*-
"""
coq_install_coha.py is part of Coquery.
Copyright (c) 2016, 2017 Gero Kunter ([email protected])
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import unicode_literals
import pandas as pd
import logging
import os
from coquery.corpusbuilder import BaseCorpusBuilder
from coquery.tables import Column, Identifier, Link
from coquery.defines import SQL_MYSQL
from coquery import options
class BuilderClass(BaseCorpusBuilder):
file_filter = "????.txt"
file_table = "Files"
file_id = "FileId"
file_name = "Filename"
file_path = "Path"
file_columns = [
Identifier(file_id, "SMALLINT(3) UNSIGNED NOT NULL"),
Column(file_name, "CHAR(8) NOT NULL"),
Column(file_path, "TINYTEXT NOT NULL")]
word_table = "Lexicon"
word_id = "WordId"
word_label = "Word"
word_labelcs = "WordCS"
word_lemma = "Lemma"
word_pos = "POS"
word_columns = [
Identifier(word_id, "MEDIUMINT(7) UNSIGNED NOT NULL"),
Column(word_label, "VARCHAR(26) NOT NULL"),
Column(word_labelcs, "VARCHAR(48) NOT NULL"),
Column(word_lemma, "VARCHAR(24) NOT NULL"),
Column(word_pos, "VARCHAR(24) NOT NULL")]
source_table = "Sources"
source_id = "SourceId"
source_label = "Title"
source_author = "Author"
source_year = "Year"
source_genre = "Genre"
source_words = "Words"
source_publisher = "Publisher"
source_columns = [
Identifier(source_id, "MEDIUMINT(7) UNSIGNED NOT NULL"),
Column(source_words, "MEDIUMINT(6) UNSIGNED NOT NULL"),
Column(source_genre, "ENUM('FIC','MAG','NEWS','NF') NOT NULL"),
Column(source_year, "SMALLINT(4) NOT NULL"),
Column(source_label, "VARCHAR(150) NOT NULL"),
Column(source_author, "VARCHAR(100) NOT NULL"),
Column(source_publisher, "VARCHAR(114) NOT NULL")]
corpus_table = "Corpus"
corpus_id = "ID"
corpus_word_id = "WordId"
corpus_source_id = "SourceId"
corpus_columns = [
Identifier(corpus_id, "INT(9) UNSIGNED NOT NULL"),
Link(corpus_word_id, word_table),
Link(corpus_source_id, source_table)]
auto_create = ["file", "word", "source", "corpus"]
expected_files = ["sources_coha.xlsx", "lexicon.txt",
"1810.txt", "1820.txt", "1830.txt", "1840.txt",
"1850.txt", "1860.txt", "1870.txt", "1880.txt",
"1890.txt", "1900.txt", "1910.txt", "1920.txt",
"1930.txt", "1940.txt", "1950.txt", "1960.txt",
"1970.txt", "1980.txt", "1990.txt", "2000.txt"]
def __init__(self, gui=False, *args):
# all corpus builders have to call the inherited __init__ function:
super(BuilderClass, self).__init__(gui, *args)
self.add_time_feature(self.source_year)
@staticmethod
def get_name():
return "COHA"
@staticmethod
def get_db_name():
return "coq_coha"
@staticmethod
def get_language():
return "English"
@staticmethod
def get_language_code():
return "en-US"
@staticmethod
def get_title():
return "Corpus of Historical American English"
@staticmethod
def get_description():
return [
"The Corpus of Historical American English (COHA) is the largest "
"structured corpus of historical English. The corpus was created "
"by Mark Davies of Brigham Young University, with funding from "
"the US National Endowment for the Humanities.",
"COHA allows you search more than 400 million words of text of "
"American English from 1810 to 2009."]
@staticmethod
def get_references():
return ["Davies, Mark. (2010-) <i>The Corpus of Historical American "
"English: 400 million words, 1810-2009</i>. Available online "
"at http://corpus.byu.edu/coha/."]
@staticmethod
def get_url():
return "http://corpus.byu.edu/coha/"
@staticmethod
def get_license():
return "COHA is available under the terms of a commercial license."
@staticmethod
def get_installation_note():
db_type = options.cfg.current_connection.db_type()
if db_type == SQL_MYSQL:
return """
<p><b>MySQL installation note</b><p>
<p>The COHA installer uses a special feature of MySQL servers
which allows to load large chunks of data into the database in a
single step.</p>
<p>This feature notably speeds up the installation of the COHA
corpus. However, it may be disabled on your MySQL servers. In that
case, the installation will fail with an error message similar to
the following: </p>
<p><code>The used command is not allowed with this MySQL version
</code></p>
<p>Should the installation fail, please ask your MySQL server
administrator to enable loading of local in-files by setting the
option <code>local-infile</code> in the MySQL configuration file.
</p>
"""
else:
return None
def build_load_files(self):
files = sorted(self.get_file_list(self.arguments.path,
self.file_filter))
self._widget.progressSet.emit(len(files), "")
for count, file_name in enumerate(files):
if self.interrupted:
return
base_name = os.path.basename(file_name)
s = "Reading '{}' (file %v out of %m)".format(base_name)
self._widget.labelSet.emit(s)
logging.info("Reading {}".format(file_name))
# set up default arguments for the load_file() method:
kwargs = {"sep": "\t", "quoting": 3, "encoding": "latin-1"}
# handle the different files:
if base_name == "sources_coha.xlsx":
# load the sources from an excel file
names = [self.source_id, self.source_words, self.source_genre,
self.source_year, self.source_label,
self.source_author, self.source_publisher]
df = pd.read_excel(file_name, skiprows=0, names=names,
engine=None)
for col in (self.source_label, self.source_author,
self.source_publisher):
df[col] = df[col].fillna("")
self.DB.load_dataframe(
df, self.source_table, index_label=None)
elif base_name == "lexicon.txt":
# load the lexicon file
names = (self.word_id, self.word_labelcs, self.word_label,
self.word_lemma, self.word_pos)
self.DB.load_file(
file_name=file_name,
table=self.word_table,
index=None,
skiprows=2,
header=None,
names=names,
error_bad_lines=False,
na_filter=False,
drop_duplicate=self.word_id,
**kwargs)
else:
# load a corpus file
names = (self.corpus_source_id, self.corpus_id,
self.corpus_word_id)
lines = self.DB.load_file(
file_name=file_name,
table=self.corpus_table,
index=None,
header=None,
names=names,
**kwargs)
self._corpus_id += lines
self.store_filename(base_name)
self._widget.progressUpdate.emit(count + 1)
if __name__ == "__main__":
BuilderClass().build()
| gpl-3.0 |
oduwa/Pic-Numero | PicNumero/tqdm/_tqdm_pandas.py | 2 | 2118 | # future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import absolute_import
from __future__ import division
__author__ = "github.com/casperdcl"
__all__ = ['tqdm_pandas']
def tqdm_pandas(t):
"""
Registers the given `tqdm` instance with
`pandas.core.groupby.DataFrameGroupBy.progress_apply`.
It will even close() the `tqdm` instance upon completion.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_pandas
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm_pandas(tqdm()) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : DataFrame[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
*args and *kwargs are transmitted to DataFrameGroupBy.apply()
"""
t.total = getattr(df, 'ngroups', None)
if t.total is None: # not grouped
t.total = len(df) if isinstance(df, Series) \
else df.size // len(df)
else:
t.total += 1 # pandas calls update once too many
def wrapper(*args, **kwargs):
t.update()
return func(*args, **kwargs)
result = df.apply(wrapper, *args, **kwargs)
t.close()
return result
# Enable custom tqdm progress in pandas!
DataFrame.progress_apply = inner
DataFrameGroupBy.progress_apply = inner
Series.progress_apply = inner
SeriesGroupBy.progress_apply = inner
| mit |
justincassidy/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
performancecopilot/pcp2pdf | src/pcp2pdf/stats.py | 1 | 37113 | # pcp2pdf.stats - pcp(1) report graphing utility
# Copyright (C) 2014 Michele Baldessari
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import bisect
import datetime
import hashlib
import itertools
import multiprocessing
import os
import re
import resource
import shutil
import sys
import tempfile
import time
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus import Image
from reportlab.platypus import PageBreak
from reportlab.platypus import Spacer
from reportlab.platypus import Table
import reportlab.lib.colors
from reportlab.lib.pagesizes import A4
from reportlab.lib.pagesizes import landscape
from reportlab.lib.units import inch
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.patches import Rectangle
import cpmapi as c_api
from pcp2pdf.style import PcpDocTemplate
from pcp2pdf.archive import PcpArchive
from pcp2pdf.archive import PcpHelp
# When showing a rectangle gap when the interval is > than the average frequency we
# first multiply by FREQUNCY ERROR in order to avoid spurious rectangles
FREQUENCY_ERROR = 1.1
# Threshold above which the legend is placed on the bottom
# of the page
LEGEND_THRESHOLD = 50
progress_counter = multiprocessing.Value('i', 0)
progress_lock = multiprocessing.Lock()
progress_total = 0
def ellipsize(text, limit=20):
'''Truncates a string in a nice-formatted way.'''
if len(text) < limit:
return text
limit = limit - 2 # '..'
a = int(limit / 2)
b = int(limit / 2 + (limit % 2))
ret = text[:a] + '..' + text[len(text) - b:]
return ret
def date_string(dt):
'''Prints a datetime string in format '2014-10-21 23:24:10'.'''
return dt.strftime('%Y-%m-%d %H:%M:%S')
def parse_progress_callback(ts, start, finish):
percentage = round(((ts - start) / (finish - start)) * 100.0, 1)
sys.stdout.write('\rParsing archive: [%s %s%%]' % ('#' * (int(percentage/10)), percentage))
sys.stdout.flush()
def graph_progress_callback(pcpobj):
percentage = round((progress_counter.value / progress_total) * 100.0, 1)
sys.stdout.write('\rCreating graphs: [%s %s%%]' % ('#' * (int(percentage/10)), percentage))
sys.stdout.flush()
def split_chunks(list_to_split, chunksize):
"""Split the list l in chunks of at most n in size."""
ret = [list_to_split[i:i + chunksize]
for i in range(0, len(list_to_split), chunksize)]
return ret
def graph_wrapper(zip_obj):
"""Wrapper due to pool.map() single argument limit.
zip_obj = zip(itertools.repeat(self), self.all_graphs)
where self is a PcpStats object. Each CPU will get
a slice of the self.all_graphs list
"""
(pcpstats_obj, data) = list(zip_obj)
(label, fname, metrics, text, indom_regexes, histogram) = data
if histogram:
ret = pcpstats_obj.create_histogram(fname, label, metrics, indom_regexes)
else:
ret = pcpstats_obj.create_graph(fname, label, metrics, indom_regexes)
with progress_lock:
progress_counter.value += 1
graph_progress_callback(pcpstats_obj)
return ((label, fname, metrics, text, indom_regexes, histogram), ret)
def print_mem_usage(data):
usage = resource.getrusage(resource.RUSAGE_SELF)
print("Graphing: {0} usertime={1} systime={2} mem={3} MB"
.format(data, usage[0], usage[1], (usage[2] / 1024.0)))
def match_res(patterns, string, flags=0):
if type(string) != str:
string = str(string)
for pattern in patterns:
ret = re.match(pattern, string, flags)
if ret is not None:
return ret
return None
class PcpStats(object):
story = []
def __init__(self, args, opts):
self.args = args
self.opts = opts
self.configparser = opts.configparser
self.doc = PcpDocTemplate(opts.output_file, self.configparser, pagesize=landscape(A4))
self.pcphelp = PcpHelp()
self.pcparchive = PcpArchive(args, opts)
if self.opts.dpi is not None and self.opts.dpi > 0:
self.DPI = self.opts.dpi
else:
self.DPI = self.configparser.getint('main', 'dpi')
self.logo = self.configparser.get('main', 'logo')
# Allow to be run from the current dir for unit-testing purposes
if not os.path.isfile(self.logo):
self.logo = os.path.join(os.getcwd(), "src", "pcplogo.png")
# Using /var/tmp as /tmp is ram-mounted these days
self.tempdir = tempfile.mkdtemp(prefix='pcpstats', dir='/var/tmp')
# This will contain all the metrics found in the archive file
self.all_data = {}
# Verify which set of metrics are to be used
self.metrics = []
if not opts.include and not opts.exclude:
self.metrics = sorted(self.pcparchive.get_metrics())
elif opts.include and not opts.exclude: # Only specified filters
metrics = sorted(self.pcparchive.get_metrics())
for i in opts.include:
try:
matched = filter(lambda x: re.match(i, x), metrics)
except Exception:
print("Failed to parse: {0}".format(i))
sys.exit(-1)
self.metrics.extend(matched)
elif not opts.include and opts.exclude: # Exclude specified filter
metrics = sorted(self.pcparchive.get_metrics())
matched = []
for i in opts.exclude:
try:
matched.extend(filter(lambda x: re.match(i, x), metrics))
except Exception:
print("Failed to parse: {0}".format(i))
sys.exit(-1)
self.metrics = sorted(list(set(metrics) - set(matched)))
else:
all_metrics = sorted(self.pcparchive.get_metrics())
matched = []
for i in opts.exclude:
try:
matched.extend(filter(lambda x: re.match(i, x),
all_metrics))
except Exception:
print("Failed to parse: {0}".format(i))
sys.exit(-1)
delta_metrics = sorted(list(set(all_metrics) - set(matched)))
metrics = sorted(self.pcparchive.get_metrics())
for i in opts.include:
try:
matched = filter(lambda x: re.match(i, x), metrics)
except Exception:
print("Failed to parse: {0}".format(i))
sys.exit(-1)
delta_metrics.extend(matched)
self.metrics = delta_metrics
self.custom_graphs = []
# Verify if there are any custom graphs. They can be defined like
# the following :
# "foo:network.interface.out.bytes:eth0,network.tcp..*:.*"
for graph in opts.custom_graphs:
try:
x = graph.find(':')
label = graph[0:x]
line = graph[x + 1:]
elements = line.split(',')
except Exception:
print("Failed to parse label: {0}".format(graph))
sys.exit(-1)
if label in self.metrics:
print("Cannot use label {0}. It is an existing metric".format(label))
sys.exit(-1)
all_metrics = sorted(self.pcparchive.get_metrics())
indom_regexes = {}
metrics = []
for element in elements:
try:
(metric_str, indom_str) = element.split(':')
except Exception:
print("Failed to parse: {0}".format(element))
sys.exit(-1)
try:
tmp_metrics = filter(lambda x: re.match(metric_str, x),
all_metrics)
metrics.extend(tmp_metrics)
except Exception:
print("Failed to parse: {0}".format(metric_str))
sys.exit(-1)
for metric in tmp_metrics:
if metric in indom_regexes:
indom_regexes[metric].append(indom_str)
else:
indom_regexes[metric] = [indom_str]
# Try to compile the indom_res to make sure they are valid
errors = []
for pattern in indom_regexes:
try:
re.compile(pattern)
except Exception:
errors.append(pattern)
pass
if errors:
print("Invalid regular expressions: {0}".format(
" ".join(errors)))
sys.exit(-1)
# We expanded all the metrics here. We cannot do the same for
# indoms as those are not yet available. We just pass the regexes
# and do it at custom graph creation time
self.custom_graphs.append(("Custom.%s" % label, metrics, indom_regexes))
try: # Not all matplotlib versions have this key
matplotlib.rcParams['figure.max_open_warning'] = 100
except KeyError:
pass
def _graph_filename(self, metrics, extension='.png', histogram=False):
'''Creates a unique constant file name given a list of metrics.'''
# We're on python 2.6o .jpg even though graph quality is affected,
# because the underlying imaging lib bails out on a few graphs from
# time to time
pyver = sys.version_info
if pyver[0] == 2 and pyver[1] <= 6:
extension = '.jpg'
if isinstance(metrics, list):
temp = ''
for i in metrics:
temp += i
else:
temp = "_".join(metrics)
if histogram:
temp = 'h' + temp
fname = os.path.join(self.tempdir, temp + extension)
return fname
def _do_heading(self, text, sty):
if isinstance(text, list):
text = "_".join(text)
# create bookmarkname
bn = hashlib.sha1(text.encode('utf-8') + sty.name.encode('utf-8')).hexdigest()
# modify paragraph text to include an anchor point with name bn
# store the bookmark name on the flowable so afterFlowable can see this
h = Paragraph(text + '<a name="%s"/>' % bn, sty)
h._bookmarkName = bn
self.story.append(h)
def rate_convert(self, timestamps, values):
'''Do a rate conversion
Given a list of timestamps and a list of values it will return the
following:
[[t1,..,tN], [(v1-v0)/(t1-t0),(v2-v1)/(t2-t1),..,(vN-vN-1)/(tN -tN-1)]
'''
if len(timestamps) != len(values):
raise Exception('Len of timestamps must be equal to len of values')
new_timestamps = []
new_values = []
for t in range(1, len(timestamps)):
delta = timestamps[t] - timestamps[t - 1]
new_timestamps.append(delta)
for v in range(1, len(values)):
seconds = new_timestamps[v - 1].total_seconds()
try:
delta = (values[v] - values[v - 1]) / seconds
except ZeroDivisionError:
# If we have a zero interval but the values difference is zero
# return 0 anyway
if values[v] - values[v - 1] == 0:
delta = 0
pass
else:
# if the delta between the values is not zero try to use
# the previous calculated delta
if v > 1:
delta = new_values[v - 2]
else: # In all other cases just set the delta to 0
delta = 0
pass
new_values.append(delta)
# Add previous datetime to the time delta
for t in range(len(new_timestamps)):
ts = new_timestamps[t]
new_timestamps[t] = ts + timestamps[t]
return (new_timestamps, new_values)
def find_max(self, timestamp, metrics):
'''Find maximum value given timestamp and metrics
Given data as returned by pcparchive.get_values a timestamp and a set
of metrics, find the maximum y value. If the given timestamp does not
exist in the data we do a linear interpolation.
'''
max_value = -sys.maxint
for metric in metrics:
for indom in self.all_data[metric]:
timestamps = self.all_data[metric][indom][0]
y_values = self.all_data[metric][indom][1]
try:
x = timestamps.index(timestamp)
y = y_values[timestamp]
except ValueError:
time_key_right = bisect.bisect_right(timestamps, timestamp)
# If the custom label's timestamp falls outside the
# data we have we skip this
if time_key_right >= len(timestamps):
continue
time_key_left = time_key_right - 1
x1 = mdates.date2num(timestamps[time_key_left])
x2 = mdates.date2num(timestamps[time_key_right])
y1 = y_values[time_key_left]
y2 = y_values[time_key_right]
if x1 == x2 or y1 == y2: # No need to do any interpolation
y = y1
else:
m = (y2 - y1) / (x2 - x1)
x = mdates.date2num(timestamp) - x1
y = m * x + y1
if y > max_value:
max_value = y
return max_value
def get_frequency(self, data):
# First we calculate the observed frequency (in seconds) of the
# observed measurements
total = 0.0
counter = 0
for metric in data:
for indom in data[metric]:
timestamps = data[metric][indom][0]
last = None
for timestamp in timestamps:
if not last:
last = timestamp
continue
delta = (timestamp - last).total_seconds()
total += delta
counter += 1
last = timestamp
frequency = total / counter
return frequency
def find_data_gaps(self, data):
'''Find data gaps given a dataset
Returns a dictionary with tuples containing the start and end of the
large intervals as tuples. The value of the dictionary is a list of
tuples where this interval has been observed (metric, indom).
Returns: {(gap1start, gap1end): [(metric, indom), (m2, indom2), ...],
{gap2start, gap2end): [(metric, indom), (m2, indom2), ...]}
'''
frequency = self.get_frequency(data)
ret = {}
for metric in data:
for indom in data[metric]:
timestamps = data[metric][indom][0]
last = None
for timestamp in timestamps:
if not last:
last = timestamp
continue
delta = (timestamp - last).total_seconds()
if delta > frequency * FREQUENCY_ERROR:
key = (last, timestamp)
if key not in ret:
ret[key] = [(metric, indom)]
else:
ret[key].append((metric, indom))
last = timestamp
return ret
def parse(self):
'''Parse the archive and store all the metrics in self.all_data
It returns a dictionary containing the metrics which have been
rate converted
'''
start_time = time.time()
(all_data, self.skipped_graphs) = self.pcparchive.get_values(progress=parse_progress_callback)
tdelta = time.time() - start_time
sys.stdout.write('\rParsing archive: [########## 100.0%%] - %.2fs' % tdelta)
sys.stdout.flush()
print()
rate_converted = {}
# Prune all the sets of values where all values are zero as it makes
# no sense to show those
for metric in all_data:
rate_converted[metric] = False
tmp = {}
# FIXME: Once python 2.6 dep is dropped we can use the following
# tmp = {key: value for key, value in all_data[metric].items()
# if not all([ v == 0 for v in value[1]])}
data = all_data[metric].items()
for key, value in data:
if not all([v == 0 for v in value[1]]):
tmp[key] = value
if tmp:
self.all_data[metric] = tmp
if self.opts.raw: # User explicitely asked to not rate convert any metrics
return rate_converted
# Rate convert all the PM_SEM_COUNTER metrics
for metric in self.all_data:
(mtype, msem, munits, dtype, desc_units, desc_type) = self.pcparchive.get_metric_info(metric)
if msem != c_api.PM_SEM_COUNTER:
continue
for indom in self.all_data[metric]:
data = self.all_data[metric][indom]
(ts, val) = self.rate_convert(data[0], data[1])
self.all_data[metric][indom] = [ts, val]
if not rate_converted[metric]:
rate_converted[metric] = {}
rate_converted[metric][indom] = True
return rate_converted
def get_category(self, label, metrics):
'''Return the category given one or a list of metric strings.'''
if isinstance(metrics, str):
if label.startswith('Custom'):
return 'Custom'
return metrics.split('.')[0]
elif isinstance(metrics, list):
if label.startswith('Custom'):
return 'Custom'
category = None
for metric in metrics:
prefix = metric.split('.')[0]
if category is None and prefix != category:
category = prefix
elif category is not None and prefix != category:
raise Exception('Multiple categories in %s' % metrics)
return category.title()
else:
raise Exception('Cannot find category for %s' % metrics)
def is_string_metric(self, metric):
'''Given a metric returns True if values' types are strings.'''
data = self.all_data[metric]
isstring = False
for indom in data:
values = data[indom][1]
if all([isinstance(v, bytes) for v in values]):
isstring = True
break
return isstring
def get_colormap(self, metrics, indom_regexes):
'''Return the colormap used to plot the different graphs'''
# First we calculate the maximum number of colors needed
max_values_len = 0
for metric in metrics:
values = self.all_data[metric]
count = 0
for indom in values:
if indom_regexes is not None and metric in indom_regexes:
if match_res(indom_regexes[metric], indom) is None:
continue
count += 1
if count > max_values_len:
max_values_len = count
# We need at most number of max(indoms) * metrics colors
vmax_color = max_values_len * len(metrics)
color_norm = colors.Normalize(vmin=0, vmax=vmax_color)
scalar_map = cm.ScalarMappable(norm=color_norm,
cmap=plt.get_cmap('Set1'))
return scalar_map
def create_histogram(self, fname, title, metrics, indom_regexes):
'''Creates a histogram image
Take a filename, a title, a list of metrics and an indom_regex to
create an image of the graph
'''
# reportlab has a 72 dpi by default
fig = plt.figure(figsize=(self.doc.graph_size[0],
self.doc.graph_size[1]))
axes = fig.add_subplot(111)
# Set Axis metadata
axes.set_xlabel('Values')
axes.set_title('{0}'.format(title, fontsize=self.doc.fonts['axes'].fontSize))
axes.set_ylabel('%s frequency' % title)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
axes.yaxis.get_major_formatter().set_scientific(False)
axes.grid(True)
found = False
indoms = 0
counter = 0
scalar_map = self.get_colormap(metrics, indom_regexes)
# Then we walk the metrics and plot
for metric in metrics:
values = self.all_data[metric]
for indom in sorted(values):
# If the indom_regexes is not None we use the indom only if the re string
# matches
if indom_regexes is not None and metric in indom_regexes:
if match_res(indom_regexes[metric], indom) is None:
continue
(timestamps, dataset) = values[indom]
# Currently if there is only one (timestamp,value) like with filesys.blocksize
# we just do not graph the thing
if len(timestamps) <= 1:
continue
if len(metrics) > 1:
if indom == 0:
lbl = metric
else:
lbl = "%s %s" % (metric, indom)
else:
if indom == 0:
lbl = title
else:
lbl = indom
lbl = ellipsize(lbl, 30)
found = True
try:
axes.hist(dataset, cumulative=False,
label=lbl, color=scalar_map.to_rgba(counter))
except Exception:
import traceback
print("Metric: {0}".format(metric))
print(traceback.format_exc())
sys.exit(-1)
indoms += 1
counter += 1
if not found:
return False
# Add legend only when there is more than one instance
lgd = False
if indoms > 1:
fontproperties = matplotlib.font_manager.FontProperties(size='xx-small')
if indoms > LEGEND_THRESHOLD:
# Draw legend on the bottom only when instances are more than
# LEGEND_THRESHOLD
lgd = axes.legend(loc=9, ncol=int(indoms ** 0.6),
bbox_to_anchor=(0.5, -0.29), shadow=True,
prop=fontproperties)
else:
# Draw legend on the right when instances are more than
# LEGEND_THRESHOLD
lgd = axes.legend(loc=1, ncol=int(indoms ** 0.5), shadow=True,
prop=fontproperties)
if lgd:
plt.savefig(fname, bbox_extra_artists=(lgd,), bbox_inches='tight',
dpi=self.DPI)
else:
plt.savefig(fname, bbox_inches='tight', dpi=self.DPI)
plt.cla()
plt.clf()
plt.close('all')
return True
def create_graph(self, fname, title, metrics, indom_regexes):
'''Creates a graph image
Take a filename, a title, a list of metrics and an indom_regex to
create an image of the graph
'''
# reportlab has a 72 dpi by default
fig = plt.figure(figsize=(self.doc.graph_size[0],
self.doc.graph_size[1]))
axes = fig.add_subplot(111)
# Set X Axis metadata
axes.set_xlabel('Time')
axes.set_title('{0}'.format(title, fontsize=self.doc.fonts['axes'].fontSize))
axes.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
axes.xaxis.set_minor_locator(mdates.MinuteLocator(interval=20))
fig.autofmt_xdate()
# Set Y Axis metadata
axes.set_ylabel(title)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
axes.yaxis.get_major_formatter().set_scientific(False)
axes.grid(True)
found = False
indoms = 0
counter = 0
scalar_map = self.get_colormap(metrics, indom_regexes)
# Then we walk the metrics and plot
for metric in metrics:
values = self.all_data[metric]
for indom in sorted(values):
# If the indom_regexes is not None we use the indom only if the re string
# matches
if indom_regexes is not None and metric in indom_regexes:
if match_res(indom_regexes[metric], indom) is None:
continue
(timestamps, dataset) = values[indom]
# Currently if there is only one (timestamp,value) like with filesys.blocksize
# we just do not graph the thing
if len(timestamps) <= 1:
continue
if len(metrics) > 1:
if indom == 0:
lbl = metric
else:
lbl = "%s %s" % (metric, indom)
else:
if indom == 0:
lbl = title
else:
lbl = indom
lbl = ellipsize(lbl, 30)
found = True
try:
axes.plot(timestamps, dataset, 'o:', label=lbl,
color=scalar_map.to_rgba(counter))
except Exception:
import traceback
print("Metric: {0}".format(metric))
print(traceback.format_exc())
sys.exit(-1)
# Have the Y axis always start from 0
axes.set_ylim(ymin=0)
indoms += 1
counter += 1
if not found:
return False
# Show any data collection gaps in the graph
gaps = self.find_data_gaps(self.all_data).keys()
if gaps:
for i in gaps:
(g1, g2) = i
x1 = mdates.date2num(g1)
x2 = mdates.date2num(g2)
(ymin, ymax) = plt.ylim()
axes.add_patch(Rectangle((x1, ymin), x2 - x1, ymax - ymin,
facecolor="lightgrey"))
# Draw self.labels if non empty
if self.opts.labels:
for label in self.opts.labels:
max_value = self.find_max(self.opts.labels[label], metrics)
# should we not find a max_value at all (due to empty timestamps)
if max_value == -sys.maxint:
max_value = 0
axes.annotate(label, xy=(mdates.date2num(self.opts.labels[label]), max_value),
xycoords='data', xytext=(30, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
# Add legend only when there is more than one instance
lgd = False
if indoms > 1:
fontproperties = matplotlib.font_manager.FontProperties(size='xx-small')
if indoms > LEGEND_THRESHOLD:
# Draw legend on the bottom only when instances are more than
# LEGEND_THRESHOLD
lgd = axes.legend(loc=9, ncol=int(indoms ** 0.6),
bbox_to_anchor=(0.5, -0.29), shadow=True,
prop=fontproperties)
else:
# Draw legend on the right when instances are more than
# LEGEND_THRESHOLD
lgd = axes.legend(loc=1, ncol=int(indoms ** 0.5), shadow=True,
prop=fontproperties)
if lgd:
plt.savefig(fname, bbox_extra_artists=(lgd,), bbox_inches='tight',
dpi=self.DPI)
else:
plt.savefig(fname, bbox_inches='tight', dpi=self.DPI)
plt.cla()
plt.clf()
plt.close('all')
return True
def get_all_graphs(self):
'''Returns all the graphs that need to be plotted
Prepare the full list of graphs that will be drawn
Start with any custom graphs if they exist and
proceed with the remaining ones. Split the metrics
that have string values into a separate array
all_graphs = [(label, fname, (m0, m1, .., mN), text), ...].
'''
all_graphs = []
string_metrics = []
indom_res = []
for graph in self.custom_graphs:
(label, metrics, indom_res) = graph
fname = self._graph_filename(label)
text = None
custom_metrics = []
for metric in metrics: # verify custom graph's metrics existance
if metric in self.all_data:
custom_metrics.append(metric)
if not custom_metrics:
break
if isinstance(metrics, str) and metrics in self.pcphelp.help_text:
text = '<strong>%s</strong>: %s' % (metrics, self.pcphelp.help_text[metrics])
all_graphs.append((label, fname, custom_metrics, text, indom_res, False))
if self.opts.histogram:
fname = self._graph_filename(label, histogram=True)
all_graphs.append(('%s histogram' % label, fname, custom_metrics, text, indom_res, True))
for metric in sorted(self.all_data):
# Make sure that we plot only the metrics that the
# user has specified
if metric not in self.metrics:
continue
if self.is_string_metric(metric):
string_metrics.append(metric)
continue
fname = self._graph_filename([metric])
units_str = self.pcparchive.get_metric_info(metric)[4]
type_str = self.pcparchive.get_metric_info(metric)[5]
if isinstance(metric, str) and metric in self.pcphelp.help_text:
help_text = self.pcphelp.help_text[metric]
else:
help_text = '...'
text = '<strong>%s</strong>: %s (%s - %s)' % (metric, help_text, units_str, type_str)
if self.rate_converted[metric]:
text = text + ' - <em>%s</em>' % 'rate converted'
all_graphs.append((metric, fname, [metric], text, None, False))
if self.opts.histogram:
fname = self._graph_filename([metric], histogram=True)
all_graphs.append(('%s histogram' % metric, fname, [metric], text, None, True))
return (all_graphs, string_metrics)
def output(self):
# FIXME: Split this function in smaller pieces. This is unreadable
self.rate_converted = self.parse()
(self.all_graphs, string_metrics) = self.get_all_graphs()
if not self.all_graphs:
print('No usable non-zero graphs found.')
sys.exit(0)
width = self.doc.pagesize[0]
hostname = self.pcparchive.get_hostname()
self._do_heading('Report', self.doc.fonts["heading1_invisible"])
self.story.append(Paragraph('%s' % hostname, self.doc.fonts["front_title"]))
self.story.append(Spacer(1, 1.5 * inch))
self.story.append(Image(self.logo))
self.story.append(Spacer(1, 0.5 * inch))
data = [['PCP Archive', '%s' % (" ".join(self.args))],
['Start', '%s' % date_string(datetime.datetime.fromtimestamp(self.pcparchive.start))],
['End', '%s' % date_string(datetime.datetime.fromtimestamp(self.pcparchive.end))],
['Created', '%s' % date_string(datetime.datetime.now())], ]
rows = 4
if self.pcparchive.interval:
data.append(['Interval', '%s seconds' % self.pcparchive.interval])
rows = 5
style = [('GRID', (0, 0), (-1, -1), 1, reportlab.lib.colors.black),
('ALIGN', (0, 0), (-1, -1), "LEFT"),
('FONTSIZE', (0, 0), (-1, -1), 14),
('FONTNAME', (0, 0), (-1, -1), "Helvetica"),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('INNERGRID', (0, 0), (-1, -1), 0.44, reportlab.lib.colors.black), ]
table = Table(data, 2 * [3.5 * inch], rows * [0.4 * inch])
table.setStyle(style)
self.story.append(table)
self.story.append(PageBreak())
start_time = time.time()
done_metrics = []
global progress_total
progress_total = len(self.all_graphs)
# Set this to False to disable multiprocessing
if True:
pool = multiprocessing.Pool(None)
l = zip(itertools.repeat(self), self.all_graphs)
metrics_rets = pool.map(graph_wrapper, l)
(metrics, rets) = zip(*metrics_rets)
done_metrics = [metric for (metric, ret) in metrics_rets if ret]
else: # This is just to debug in non multi-threaded mode
for graph in self.all_graphs:
(label, fname, metrics, text, indom_regexes, histogram) = graph
if histogram:
if self.create_histogram(fname, label, metrics, indom_regexes):
done_metrics.append(graph)
progress_counter.value += 1
else:
if self.create_graph(fname, label, metrics, indom_regexes):
done_metrics.append(graph)
progress_counter.value += 1
graph_progress_callback(self)
tdelta = time.time() - start_time
sys.stdout.write('\rCreating graphs: [########## 100.0%%] - %.2fs' % tdelta)
sys.stdout.flush()
print()
# Build the string metrics table. It only prints
# a value if it changed over time
data = [('Metric', 'Timestamp', 'Value')]
for metric in string_metrics:
last_value = None
for indom in self.all_data[metric]:
timestamps = self.all_data[metric][indom][0]
values = self.all_data[metric][indom][1]
for (ts, v) in zip(timestamps, values):
if last_value != v:
text = ellipsize(v, 100)
ts = date_string(ts)
data.append((metric, '%s' % ts, text))
last_value = v
if len(data) > 1:
self._do_heading('String Metrics', self.doc.fonts["heading1"])
self.story.append(Spacer(1, 0.2 * inch))
table = Table(data, colWidths=(0.17 * width, 0.12 * width, 0.56 * width))
table.setStyle(self.doc.tablestyle)
self.story.append(table)
self.story.append(PageBreak())
# At this point all images are created let's build the pdf
print("Building pdf: ", end='')
sys.stdout.flush()
start_time = time.time()
# Add the graphs to the pdf
last_category = ''
for graph in done_metrics:
(label, fname, metrics, text, indom_res, histogram) = graph
category = self.get_category(label, metrics)
if last_category != category:
self._do_heading(category, self.doc.fonts["heading1"])
last_category = category
self._do_heading(label, self.doc.fonts["heading2_invisible"])
self.story.append(Image(fname, width=self.doc.graph_size[0] * inch,
height=self.doc.graph_size[1] * inch))
if text:
self.story.append(Paragraph(text, self.doc.fonts["normal"]))
self.story.append(PageBreak())
self.doc.multiBuild(self.story)
tdelta = time.time() - start_time
print("{0} - {1:.2f}s".format(self.opts.output_file, tdelta))
shutil.rmtree(self.tempdir)
| gpl-2.0 |
hsuantien/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
wbap/Hackathon2015 | Nakamura/Multimodal Deep Learning/imgread.py | 2 | 1134 | #coding:utf-8
from PIL import Image
import numpy as np
import csv
import pandas as pd
#from pylab import *
all_person = []
for p in range(1,13,1): #person
all_digit = []
for d in range(0,10,1): #digit
all_frames = []
for f in range(0,15,1): #frame
filename = str(d) + "-" + str(f) + ".gif"
path = "/Users/kazeto/Desktop/data/Video/"
dilectory = path + str(p) + "/all/" + filename
img = np.array( Image.open(dilectory) )
#arrary declear
all_img = []
img_array = []
for k in range(len(img)):
img_array = np.r_[img_array , img[k]]
all_frames.append(img_array)
img_array = []
all_digit.append(all_frames)
all_frames = []
all_person.append(all_digit)
all_digit = []
print '1:{} 2:{} 3:{} 4:{}'.format(len(all_person),len(all_person[0]),len(all_person[0][0]),len(all_person[0][0][0]))
# print all_img
with open('/Users/kazeto/Desktop/data/img.csv','wb') as f:
writer = csv.writer(f)
writer.writerows(all_person)
| apache-2.0 |
soylentdeen/grillduino | guiHandler.py | 1 | 6204 | import numpy
import Tkinter
import time
import threading
import serial
import random
import Queue
import matplotlib.pyplot as pyplot
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import tweetHandler
class GuiPart:
def __init__(self, master, queue, endCommand):
self.queue = queue
out = open('calibration.dat', 'w')
out.close()
self.startTime = time.time()
# Set up the GUI
DoneButton = Tkinter.Button(master, text='Done', command=endCommand)
DoneButton.pack()
RefreshButton = Tkinter.Button(master, text='Refresh',
command=self.refresh)
RefreshButton.pack()
self.TempEntry = Tkinter.Entry(master)
self.TempEntry.bind('<Return>', self.addTemp)
self.TempEntry.pack()
self.R1 = 1000.0
self.R2 = 1000.0
self.V1 = numpy.array([])
self.V2 = numpy.array([])
self.V3 = numpy.array([])
self.R3 = numpy.array([])
self.T3 = numpy.array([])
self.Vtime = numpy.array([])
self.meattemp = [numpy.array([]), numpy.array([])]
figure = pyplot.figure(figsize=(5, 4), dpi=100)
axis = figure.add_axes([0.1, 0.1, 0.8, 0.8])
self.V1line, = axis.plot(self.Vtime, self.V1, marker='o')
self.V2line, = axis.plot(self.Vtime, self.V2, marker='o')
self.V3line, = axis.plot(self.Vtime, self.V3, marker='o')
self.meatline, = axis.plot(self.meattemp[0], self.meattemp[1])
self.canvas = FigureCanvasTkAgg(figure, master)
self.canvas.show()
self.canvas._tkcanvas.pack(side=Tkinter.TOP,fill=Tkinter.BOTH, expand=1)
# Add more GUI stuff here
def calculateTemp(self):
#I = numpy.mean((self.V1[-1]-self.V2[-1])/self.R1,
# (self.V2[-1]-self.V3[-1])/self.R2)
print self.V2[-1], self.V3[-1], self.R2
I = (self.V2[-1]-self.V3[-1])/self.R2
print I, self.V3[-1], self.V3[-1]/I
self.R3 = numpy.append(self.R3, self.V3[-1]/I)
#self.T3.append(self.R3*self.slope + self.yint)
def refresh(self):
self.V1line.set_data(self.Vtime, self.V1)
self.V2line.set_data(self.Vtime, self.V2)
self.V3line.set_data(self.Vtime, self.V3)
self.calculateTemp()
out = open('calibration.dat', 'a')
out.write(str(self.Vtime[-1])+" "+str(self.R3[-1])+'\n')
out.close()
self.meatline.set_data(self.meattemp[0], self.meattemp[1])
ax = self.canvas.figure.axes[0]
print 'update: ', self.R3[-1]
try:
ax.set_xlim(min(self.Vtime), max(self.Vtime)+1.0)
ax.set_ylim(0.0, 5.5)
except:
pass
self.canvas.draw()
def addTemp(self, event):
try:
temp = float(self.TempEntry.get())
t = time.time()-self.startTime
self.temp[0] = numpy.append(self.temp[0], t)
self.temp[1] = numpy.append(self.temp[1], temp)
except:
print "Error!"
def processIncoming(self):
"""
Handle all the messages currently in the queue (if any).
"""
while self.queue.qsize():
try:
msg = self.queue.get(0)
# As a test, we simply print it
#print msg
self.V1 = numpy.append(self.V1, msg[0])
self.V2 = numpy.append(self.V2, msg[1])
self.V3 = numpy.append(self.V3, msg[2])
self.Vtime = numpy.append(self.Vtime, msg[3]-self.startTime)
self.refresh()
except Queue.Empty:
pass
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI. We spawn a new thread for the worker.
"""
self.master = master
self.Bird = tweetHandler.Tweety()
# Create the queue
self.queue = Queue.Queue()
# Set up the GUI part
self.gui = GuiPart(master, self.queue, self.endApplication)
# Set up the thread to do asynchronous I/O
# More can be made if necessary
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
self.thread1.start()
self.thread2 = threading.Thread(target=self.tweeterThread)
self.thread2.start()
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodicCall()
def periodicCall(self):
"""
Check every 100 ms if there is something new in the queue.
"""
self.gui.processIncoming()
if not self.running:
# This is the brutal stop of the system. You may want to do
# some cleanup before actually shutting it down.
import sys
sys.exit(1)
self.master.after(1000, self.periodicCall)
def workerThread1(self):
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select()'.
One important thing to remember is that the thread has to yield
control.
"""
self.serial = serial.Serial(port='/dev/ttyUSB0')
while self.running:
#print self.serial.inWaiting()
if self.serial.inWaiting() != 0:
arduino_data = numpy.array(self.serial.readline().split(),
dtype=float)
msg = arduino_data.tolist()
msg.append(time.time())
self.queue.put(msg)
else:
#print("Waiting")
time.sleep(5)
def tweeterThread(self):
"""
This is where the tweeting is handled.
"""
self.
def endApplication(self):
self.running = 0
self.serial.close()
| gpl-2.0 |
bert9bert/statsmodels | examples/incomplete/dates.py | 5 | 1262 | """
Using dates with timeseries models
"""
import statsmodels.api as sm
import pandas as pd
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# Using Pandas
# ------------
# Make a pandas Series or DataFrame with DatetimeIndex
endog = pd.Series(data.endog, index=dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
| bsd-3-clause |
annahs/atmos_research | LEO_nonincand_calib_FF.py | 1 | 8737 | #this script is used to fit the full SP2 scattering signal of real particles
#when run for non-incandescent reals this gives a set of data that can be used to set the fixed LEO fit parameters (width and centre position) over time
import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
from scipy.optimize import curve_fit
from scipy import stats
from SP2_particle_record_UTC import ParticleRecord
from struct import *
import hk_new
import hk_new_no_ts_LEO
from scipy import linspace, polyval, polyfit, sqrt, stats
import math
import mysql.connector
from datetime import datetime
#setup
data_dir = 'F:/Alert/2012/SP2B_files/'
start_analysis_at = datetime(2012,4,5)
end_analysis_at = datetime(2012,4,6)
show_full_fit = True
SP2_number = 44
zeroX_evap_threshold = 40
record_size_bytes = 1658 #size of a single particle record in bytes(UBC_SP2 = 1498, EC_SP2 in 2009 and 2010 = 2458, Alert SP2 #44 and #58 = 1658 #17 =1498)
hk_dict = {
'yag_min':4,
'yag_max':7,
'sample_flow_min':118.5,
'sample_flow_max':121.5,
'sheath_flow_min':990,
'sheath_flow_max':1010,
}
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#**********parameters dictionary**********
parameters = {
'acq_rate': 2500000, #5000000,
}
def make_plot(record):
x_vals = record.getAcqPoints()
y_vals = record.getScatteringSignal()
fit_result = record.FF_results
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x_vals,y_vals,'o', markerfacecolor='None')
ax1.plot(x_vals,fit_result, 'b')
ax1.plot(record.LF_x_vals_to_use,record.LF_y_vals_to_use, color = 'black',linewidth=3)
#ax1.plot(record.getAcqPoints(), record.getSplitDetectorSignal(), 'o', color ='green')
plt.axvline(x=record.zeroCrossingPos, ymin=0, ymax=1)
plt.axvline(x=record.beam_center_pos, ymin=0, ymax=1, color='red')
plt.show()
def find_nearest(array,value): #get index of value in array closest to value
idx = (np.abs(array-value)).argmin()
return idx
def gaussFullFit(file,parameters_dict,particle_record_size,show_fit,file_interval,number_of_records,evap_threshold,hk_dictionary,instr_number):
#pararmeters used to reject invalid particle records based on scattering peak attributes
min_peakheight = 10
max_peakheight = 3600
min_peakpos = 20
max_peakpos = 160
max_incand = 10
type_particle = 'nonincand'
f = open(file, 'rb')
record_index = 0
multiple_records = []
i=1
while record_index < number_of_records:
record = f.read(particle_record_size)
if record_index == 0 or record_index%file_interval == 0:
try:
particle_record = ParticleRecord(record, parameters_dict['acq_rate'])
except:
print 'corrupt particle record',record_index
raw_input("Press Enter to continue...")
record_index+=1
continue
event_time = particle_record.timestamp #UTC
###### FITTING AND ANALYSIS ########
##Check DB here for bad HK parameters and skip
event_minute = int(event_time-event_time%60)
cursor.execute(('SELECT sample_flow, sheath_flow, yag_power FROM alert_hk_data WHERE SP2_ID =%s AND UNIX_UTC_ts = %s'),(instr_number,event_minute))
hk_data = cursor.fetchall()
if hk_data != []:
sample_flow = hk_data[0][0]
sheath_flow = hk_data[0][1]
yag_power = hk_data[0][2]
if (hk_dictionary['sample_flow_min'] < sample_flow < hk_dictionary['sample_flow_max']) and (hk_dictionary['sheath_flow_min'] < sheath_flow < hk_dictionary['sheath_flow_max']) and (hk_dictionary['yag_min'] < yag_power < hk_dictionary['yag_max']):
#run the scatteringPeakInfo method to retrieve various peak attributes
particle_record.scatteringPeakInfo()
actual_scat_signal = particle_record.getScatteringSignal()
scattering_baseline = particle_record.scatteringBaseline
actual_max_value = particle_record.scatteringMax
actual_max_pos = particle_record.scatteringMaxPos
#run the incandPeakInfo method to retrieve peak height
particle_record.incandPeakInfo()
incand_max = particle_record.incandMax
#check to see if incandescence is negligible, scattering signal is over threshold, is in a reasonable position, and no double peaks
if incand_max < max_incand and actual_max_value > min_peakheight and actual_max_value < max_peakheight and actual_max_pos > min_peakpos and actual_max_pos < max_peakpos:
#check zero crossing posn
#note: zero-crossing calc will depend on the slope of the zero-crossing from the split detector
try:
zero_crossing_pt = particle_record.zeroCrossingNegSlope(evap_threshold)
except:
continue
if zero_crossing_pt > 0:
#check for a double peak
try:
particle_record.isSingleParticle()
except:
print record_index
print actual_max_value
if particle_record.doublePeak==False:
particle_record.fullGaussFit()
fit_peak_pos = particle_record.FF_peak_pos
fit_width = particle_record.FF_width
fit_scattering_amp = particle_record.FF_scattering_amp
zero_cross_to_peak = (zero_crossing_pt - fit_peak_pos)
####
add_data = ('INSERT INTO alert_leo_params_from_nonincands'
'(UNIX_UTC_ts, sp2b_file, file_index, instrument_ID, particle_type, actual_scat_amp,FF_scat_amp,FF_peak_posn,FF_gauss_width,actual_zero_x_posn)'
'VALUES (%(UNIX_UTC_ts)s,%(sp2b_file)s,%(file_index)s,%(instrument_ID)s,%(particle_type)s,%(actual_scat_amp)s,%(FF_scat_amp)s,%(FF_peak_posn)s,%(FF_gauss_width)s,%(actual_zero_x_posn)s)')
if np.isnan(np.sum([actual_max_value,fit_scattering_amp,fit_peak_pos,fit_width,zero_crossing_pt])) == False: #chekc for nans
single_record ={
'UNIX_UTC_ts' :event_time,
'sp2b_file' : file,
'file_index' : record_index,
'instrument_ID' :instr_number,
'particle_type' :type_particle,
'actual_scat_amp' : float(actual_max_value),
'FF_scat_amp' : float(fit_scattering_amp),
'FF_peak_posn' : float(fit_peak_pos),
'FF_gauss_width': float(fit_width),
'actual_zero_x_posn': float(zero_crossing_pt),
}
multiple_records.append((single_record))
#bulk insert to db table
if i%4000 == 0:
#cursor.executemany(add_data, multiple_records)
#cnx.commit()
multiple_records = []
##increment count of detectible incandescent particles
i+= 1
#plot particle fit if desired
if show_full_fit == True:
print record_index, fit_width, zero_cross_to_peak
#print data['actual_scat_amp'],fit_scattering_amp, fit_peak_pos
print '\n'
make_plot(particle_record)
# else:
# print 'double_peak'
# print '\n'
# #make_plot(particle_record)
# else:
# print 'zero-x ', zero_crossing_pt
# print '\n'
# #make_plot(particle_record)
# else:
# print record_index, 'incand ', incand_max, 'scat_max ', actual_max_value, 'scat_pos ', actual_max_pos
# print '\n'
# #make_plot(particle_record)
record_index+=1
#bulk insert of remaining records to db
#if multiple_records != []:
# cursor.executemany(add_data, multiple_records)
# cnx.commit()
f.close()
os.chdir(data_dir)
for directory in os.listdir(data_dir):
if os.path.isdir(directory) == True and directory.startswith('20'):
parameters['folder']= directory
folder_date = datetime.strptime(directory, '%Y%m%d')
if folder_date >= start_analysis_at and folder_date < end_analysis_at:
parameters['directory']=os.path.abspath(directory)
os.chdir(parameters['directory'])
number_of_sp2b_files = len([name for name in os.listdir('.') if (name.endswith('.sp2b') and name.endswith('gnd.sp2b') == False)])
file_interval = number_of_sp2b_files*1
print file_interval
for file in os.listdir('.'):
if file.endswith('.sp2b') and (file.endswith('gnd.sp2b')==False):
print file
path = parameters['directory'] + '/' + str(file)
file_bytes = os.path.getsize(path) #size of entire file in bytes
record_size = record_size_bytes
number_of_records = (file_bytes/record_size)-1
gaussFullFit(file,parameters,record_size,show_full_fit,file_interval,number_of_records,zeroX_evap_threshold,hk_dict,SP2_number)
os.chdir(data_dir)
cnx.close()
| mit |
bvacaliuc/pyrasdr | plugins/pyLMS7002M/examples/VNA/calculateVNA.py | 2 | 4932 | import numpy
import os, sys
if len(sys.argv)!=2 and len(sys.argv)!=3:
print("Usage: python calculateVNA.py measurementName [plotFigures]")
print("plotFigures is optional and can have values plot for plotting figures or save to plot and save the figures")
exit(1)
startFreq = 2.3e9
endFreq = 2.6e9
nPoints = 101
measName = sys.argv[1]
saveFig = False
plotFig = False
if len(sys.argv)==3:
if sys.argv[2]=="plot":
plotFig = True
if sys.argv[2]=="save":
plotFig = True
saveFig = True
if plotFig:
from matplotlib.pyplot import *
import smithplot
from smithplot.smithaxes import update_scParams
import warnings
warnings.filterwarnings("ignore", module="matplotlib")
fMin=startFreq
fMax=endFreq
#############################################################################
# Auxiliary functions
def readFile(fileName, fMin=0, fMax=1e20):
inFile = open(fileName, 'r')
res = [ [], [], [] ]
for line in inFile:
if line[0]=="#":
continue
line = line.strip()
if line=="":
continue
tmp = line.split()
if float(tmp[0])<fMin or float(tmp[0])>fMax:
continue
res[0].append( float(tmp[0]) )
res[1].append( float(tmp[1]) )
res[2].append( float(tmp[2]) )
inFile.close()
return [ numpy.array(res[0]), numpy.array(res[1]), numpy.array(res[2]) ]
def filterData(f, data, fMin=0, fMax=1e20):
res = []
for i in range(0, len(f)):
if f[i]>fMin and f[i]<fMax:
res.append(data[i])
return numpy.array(res)
def unwrap(phase):
# Unwrap the phase
phase_unwrap = []
intAng = 0
for i in range(0,len(phase)-1):
phase_unwrap.append(phase[i]+intAng)
dph = phase[i+1]-phase[i]
if abs(dph)>150:
if dph>0:
intAng += -180
else:
intAng += 180
phase_unwrap.append(phase[-1]+intAng)
phase_unwrap=numpy.array(phase_unwrap)
# Second pass
phase_unwrap2 = []
intAng = 0
for i in range(0,len(phase_unwrap)-1):
phase_unwrap2.append(phase_unwrap[i]+intAng)
dph = phase_unwrap[i+1]-phase_unwrap[i]
if dph<-150:
phase_unwrap[i+1] += 180
if dph>10 and dph<45:
intAng += -45
if dph>45 and dph<135:
intAng += -90
if dph>135 and dph<180:
intAng += -180
if dph>180:
intAng += -360
phase_unwrap2.append(phase_unwrap[-1]+intAng)
phase_unwrap2=numpy.array(phase_unwrap2)
return phase_unwrap2
#############################################################################
# Read the measurement resutls
dutFileName = 'vna_'+measName+'_DUT_'+str(startFreq)+'_'+str(endFreq)+'_'+str(nPoints)+'.txt'
shortFileName = 'vna_'+measName+'_short_'+str(startFreq)+'_'+str(endFreq)+'_'+str(nPoints)+'.txt'
dutData = readFile(dutFileName)
shortData = readFile(shortFileName)
freq = dutData[0]
f = freq
dutPhase = unwrap(dutData[2]) * numpy.pi/180
shortPhase = unwrap(shortData[2]) * numpy.pi/180
if plotFig:
plot(numpy.array(f)/1e9,shortPhase)
xlabel('f [GHz]')
ylabel('Short Phase')
grid()
show()
plot(numpy.array(f)/1e9,dutPhase, linewidth=1.5, aa=True)
xlabel('f [GHz]')
ylabel('DUT Phase')
grid()
show()
dutData[1] = 20.0*numpy.log10(dutData[1])
shortData[1] = 20.0*numpy.log10(shortData[1])
measGammaDut = numpy.power(10.0, dutData[1]/20) * numpy.exp(1j * dutPhase)#/2)
measGammaShort = numpy.power(10.0, shortData[1]/20) * numpy.exp(1j * shortPhase)#/2)
GammaShort = numpy.array([-1.0] * len(f)) # assume ideal short
returnLoss = dutData[1] - shortData[1]
vswr = (10**(-returnLoss/20)+1)/(10**(-returnLoss/20)-1)
if plotFig:
plot(numpy.array(f)/1e9,vswr)
title('VSWR')
xlabel('f [GHz]')
ylabel('VSWR')
grid()
show()
gammaMag = numpy.power(10.0, returnLoss/20.0)
dutPhase = dutPhase - shortPhase + numpy.pi
GammaDut = gammaMag * numpy.exp(1j * dutPhase)
ZDut = (1.0 + GammaDut)/(1.0 - GammaDut)
if plotFig:
plot(numpy.array(f)/1e9, 20*numpy.log10( numpy.abs(GammaDut)), color='b', linewidth=1.5, aa=True)
xlabel('f [GHz]')
ylabel('S11 [dB]')
grid()
if saveFig:
savefig(measName+'_s11.png')
show()
figure(figsize=(24, 16))
subplot(1, 1, 1, projection='smith',grid_major_fancy=True,
grid_minor_fancy=True, plot_hacklines=True)
ZDutPlot = filterData(freq, ZDut, fMin, fMax)
plot(ZDutPlot, markevery=1, label="S11", color='b', linewidth=1.5, aa=True)
if saveFig:
savefig(measName+'_smith.png')
show()
outFileName = measName + '.s1p'
outFile = open(outFileName, 'w')
outFile.write('# Hz S RI R 50\n')
for i in range(0, len(GammaDut)):
outFile.write(str(f[i])+"\t"+str(numpy.real(GammaDut[i]))+"\t"+str(numpy.imag(GammaDut[i]))+"\n")
outFile.close()
| gpl-3.0 |
aewhatley/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rbiswas4/simlib | opsimsummary/simlib.py | 1 | 28868 | #!/usr/bin/env python
"""
Module with functionality to represent SNANA simlib data.
"""
from __future__ import division, print_function, unicode_literals
__all__ = ['SimlibMixin', 'Simlibs']
import os
import numpy as np
import subprocess
from io import StringIO, BytesIO
from collections import OrderedDict
import pandas as pd
from .summarize_opsim import SynOpSim
class SimlibField(object):
def __init__(self, fieldID=None, ra=None, dec=None, opsimtable=None,
mwebv=0.0):
self.fieldID = fieldID
self.ra = ra
self.dec = dec
self.mwebv = mwebv
self.opsimtable = opsimtable
def setfields(self, fieldID, ra, dec, opsimtable, mwebv=None):
if mwebv is None:
mwebv = mwebv
self.fieldID = fieldID
self.ra = ra
self.dec = dec
self.opsimtable = opsimtable
class SimlibMixin(object):
"""
Mixin for `SummaryOpsim` to provide the following additional functionality
geared towards creating simlibs for SNANA.
- Calculate additional columns for simlib either on a complete
`OpSimOutput.summary` dataframe, or for a dataframe for a particular
`patch` of sky (LIBID in the SNANA language).
- Calculate variables required for SNANA simlib outside the Opsim data
The parent class must have the following attributes:
- subset (must be a valid string)
The following attributes cannot be set by the user and are
set by `simlibVars`
- user (default can be None)
- host (default can be None)
- telescope
- survey
- pixelSize
In order to be able to write out the simlibs to disk, it should also have
a method to provide a sequence (may be a generator) of fields, and
`opsimtables`. The fields are instances of a class which has the following
information
`fieldID`
`ra`
`dec`
`mwebv` (which may be set to a default value). The responsibility of
selection of such fields and sorting out the correct requirements is
of the parent class.
"""
# Bad patch
pixelSize = 0.2
@property
def simlibVars(self):
"""
Collection of Attributes provided by the static method
`self.get_simlibVars` as an ordered dict with the following keys
(`user`, `host`, `pixelSize`, `survey`, `telescope`). Calling this.
also sets class variables for each of these keys.
"""
simlibVars = self.get_simlibVars(user=self.user,
host=self.host,
pixelSize=self.pixelSize,
telescope=self.telescope,
survey=self.survey)
self.user = simlibVars['user']
self.host = simlibVars['host']
self.pixelSize = simlibVars['pixelSize']
self.telescope = simlibVars['telescope']
self.survey = simlibVars['survey']
return simlibVars
@staticmethod
def get_simlibVars(user=None, host=None, pixelSize=0.2, telescope='LSST',
survey='LSST'):
""" Computes quantities only necessary for SNANA Simlib Calculation
Parameters
----------
user: string, optional, defaults to None
user running the program, used in writing out SNANA simlibs only
if None, the login name of the user is used.
host: string, optional, defaults to None
name of host machine, used only in writing out SNANA simlibs
default of None assigns the output of `hostname` to this variable.
pixelSize: float, optional, defaults to 0.2
size of the pixel in arcseconds, defaults to 0.2 as appropriate
for LSST
survey: string, optional, defaults to 'LSST'
name of survey, required only for writing out SNANA simlibs
telescope: string, optional, defaults to 'LSST'
name of survey, required only for writing out SNANA simlibs
"""
# report a user name, either from a constructor parameter, or login name
if user is None:
user = os.getlogin()
# report a host on which the calculations are done. either from
# constructor parameters or from the system hostname utility
if host is None:
proc = subprocess.Popen('hostname', stdout=subprocess.PIPE)
host, err = proc.communicate()
x = (('user', user),
('host', host),
('pixelSize', pixelSize),
('survey', survey),
('telescope', telescope))
return OrderedDict(x)
def _capitalizeY(self, x):
"""private helper method
"""
# SNANA has y filter deonoted as Y. Can change in input files to SNANA
# but more bothersome.
if 'y' in x:
return u'Y'
else:
return x
def preprocess_lib(self, opsimtable):
"""
preprocess the dataframe with data for a single SNANA simlib
field (ie. data corresponding to a single libid) if necessary.
Parameters
----------
opsimtable : `pd.DataFrame` with required data from OpSim corresponding
to a single field.
"""
# reasonable guess that columns have not been added
if 'simLibSkySig' not in opsimtable.columns:
df = self.add_simlibCols(opsimtable, pixelSize=self.pixelSize)
df['filter'] = list(map(self._capitalizeY, opsimtable['filter']))
else:
df = opsimtable
return df
@staticmethod
def add_simlibCols(opsimtable, pixelSize=0.2):
"""
Parameters
----------
opsimtable: `~pandas.DataFrame` object, mandatory
containing an opsim Output of version X. The main requirements here
are that the columns 'finSeeing', 'fiveSigmaDepth', and
'filtSkyBrightness' are defined. If the opsim output has differently
named variables or transformed variables, these should be changed to
meet the criteria.
pixelSize: float, units of arc sec, defaults to LSST value of 0.2
pixel Size
Returns
-------
DataFrame with additional columns of 'simLibPsf', 'simLibZPTAVG', and
'simLibSkySig'
.. note :: This was written from a piece of f77 code by David
Cinabro sent by email on May 26, 2015.
"""
if 'finSeeing' in opsimtable.columns:
psfwidth = 'finSeeing'
else:
psfwidth = 'FWHMeff'
opsim_seeing = opsimtable[psfwidth] # unit of arc sec sq
# magsky is in units of mag/arcsec^2
# opsim_maglim is in units of mag
opsim_maglim = opsimtable['fiveSigmaDepth']
opsim_magsky = opsimtable['filtSkyBrightness']
# Calculate two variables that come up in consistent units:
# term1 = 2.0 * opsim_maglim - opsim_magsky
# Area of pixel in arcsec squared
pixArea = pixelSize * pixelSize
term1 = 2.0 * opsim_maglim - opsim_magsky # * pixArea
# term2 = opsim_maglim - opsim_magsky
term2 = - (opsim_maglim - opsim_magsky) # * pixArea
# Calculate SIMLIB PSF VALUE
opsimtable['simLibPsf'] = opsim_seeing /2.35 /pixelSize
# 4 \pi (\sigma_PSF / 2.35 )^2
area = (1.51 * opsim_seeing)**2.
opsim_snr = 5.
arg = area * opsim_snr * opsim_snr
# Background dominated limit assuming counts with system transmission only
# is approximately equal to counts with total transmission
zpt_approx = term1 + 2.5 * np.log10(arg)
# zpt_approx = 2.0 * opsim_maglim - opsim_magsky + 2.5 * np.log10(arg)
# ARG again in David Cinabro's code
val = -0.4 * term2
# val = -0.4 * (opsim_magsky - opsim_maglim)
tmp = 10.0 ** val
# Additional term to account for photons from the source, again assuming
# that counts with system transmission approximately equal counts with total
# transmission.
zpt_cor = 2.5 * np.log10(1.0 + 1.0 / (area * tmp))
simlib_zptavg = zpt_approx + zpt_cor
# ZERO PT CALCULATION
opsimtable['simLibZPTAVG'] = simlib_zptavg
# SKYSIG Calculation
npix_asec = 1. / pixelSize**2.
opsimtable['simLibSkySig'] = np.sqrt((1.0 / npix_asec) \
* 10.0 **(-0.4 * (opsim_magsky - simlib_zptavg)))
return opsimtable
def fieldheader(self, fieldID, ra, dec, opsimtable, mwebv=0.0,
fieldtype=None):
"""
Parameters
----------
fieldID : int
integer for the unique field ID
ra : float, degrees
ra of the field location
dec : float, degrees
dec of the field location
opsimtable : `np.array` of `pd.DataFrame`
sequence of OpSim observations in above format to find number of
observations.
mwebv : float, defaults to 0.0
milky way E(B-v) value. This is usually recomputed in SNANA
depending on flags, and hence can be left as 0.0
fieldtype : string, defaults to None
string used to construct `Field: fieldtype` line, if None this
line is left out.
"""
nobs = len(opsimtable)
# String formatting
s = '# --------------------------------------------' +'\n'
s += 'LIBID: {0:10d}'.format(fieldID) +'\n'
if fieldtype is not None:
s += 'Field: {}\n'.format(fieldtype)
tmp = 'RA: {0:+10.6f} DECL: {1:+10.6f} NOBS: {2:10d} MWEBV: {3:5.2f}'
tmp += ' PIXSIZE: {4:5.3f}'
s += tmp.format(ra, dec, nobs, mwebv, self.pixelSize) + '\n'
# s += 'LIBID: {0:10d}'.format(fieldID) + '\n'
s += '# CCD CCD PSF1 PSF2 PSF2/1' +'\n'
s += '# MJD ID*NEXPOSE FLT GAIN NOISE SKYSIG (pixels) RATIO ZPTAVG ZPTERR MAG' + '\n'
return s
@staticmethod
def fieldfooter(fieldID):
s = 'END_LIBID: {0:10d}'.format(fieldID)
s += '\n'
return s
def formatSimLibField(self, fieldID, opsimtable, sep=' '):
opsimtable = self.preprocess_lib(opsimtable)
y = ''
for row in opsimtable.iterrows():
data = row[1] # skip the index
lst = ['S:',
"{0:5.4f}".format(data.expMJD),
"{0:10d}*2".format(data.obsHistID),
data['filter'],
"{0:5.2f}".format(1.), # CCD Gain
"{0:5.2f}".format(0.25), # CCD Noise
"{0:6.2f}".format(data.simLibSkySig), # SKYSIG
"{0:4.2f}".format(data.simLibPsf), # PSF1
"{0:4.2f}".format(0.), # PSF2
"{0:4.3f}".format(0.), # PSFRatio
"{0:6.2f}".format(data.simLibZPTAVG), # ZPTAVG
"{0:6.3f}".format(0.005), # ZPTNoise
"{0:+7.3f}".format(-99.)] # MAG
s = sep.join(lst)
y += s + '\n'
return y
def simlibFieldasString(self, fh, fieldID, ra, dec, opsimtable,
mwebv=0.0, fieldtype=None):
opsimtable = opsimtable.reset_index()
#raise NotImplementedError("Has not been checked")
# Write out the header for each field
s = self.fieldheader(fieldID, ra, dec, opsimtable,
mwebv=mwebv, fieldtype=fieldtype)
# Write out the actual field
s += self.formatSimLibField(fieldID, opsimtable, sep=' ')
# Write out the footer for each field
s += self.fieldfooter(fieldID)
return s
def simLibheader(self, numLibId=None, saturation_flag=1024,
comments='\n'):
"""
return a string that is the header of the simlib file
Parameters
----------
numLibId : int, defaults to None
number of libids in simlib
saturation_flag : int, defaults to 1024
value desired as saturation flag
comments: string, defaults to `\n`
comments passed on to `simlib` output
"""
sv = self.simlibVars
user = sv['user']
host = sv['host'].splitlines()[0]
# The decode lines below do the correct thing in py3
# However the isinstance line does not, needs fixing
# if isinstance(host, unicode):
# host = host.decode('utf-8')
telescope = sv['telescope']
survey = sv['survey']
# comment: I would like to generalize ugrizY to a sort but am not sure
# of the logic for other filter names. so ducking for now
s = 'SURVEY: {0:} FILTERS: ugrizY TELESCOPE: {1:}\n'.format(survey,
telescope)
s += 'USER: {0:} HOST: {1}\n'.format(user, host)
if numLibId is not None:
s += 'NLIBID: {}\n'.format(numLibId)
s += 'NPE_PIXEL_SATURATE: 100000\n'
s += 'PHOTFLAG_SATURATE: {0}\n'.format(saturation_flag)
s += comments + '\n'
s += 'BEGIN LIBGEN\n'
return s
def simLibFooter(self, numFields):
"""
"""
s = 'END_OF_SIMLIB: {0:10d} ENTRIES'.format(numFields)
return s
def writeSimlib(self, filename, fields, comments='\n',
fieldtype=None, mwebv=0., numLibId=None):
num_fields = 0
with open(filename, 'w') as fh:
# Write out the header to the simlib file
simlib_header = self.simLibheader(numLibId=numLibId, comments=comments)
fh.write(simlib_header)
# Now write the actual simlib data to file
for field in fields:
# obtain the set of field dependent parameters from `SynOpSim`
fieldID = field.fieldID
ra = field.ra
dec = field.dec
mwebv = field.mwebv
opsimtable = field.opsimtable
fh.write(self.simlibFieldasString(self, num_fields, ra, dec,
opsimtable, mwebv=mwebv,
fieldtype=fieldtype))
# Write out the header for each field
# fh.write(self.fieldheader(num_fields, ra, dec, opsimtable,
# mwebv=mwebv))
# fh.write(self.formatSimLibField(fieldID, opsimtable))
# Write out the footer for each field
# fh.write(self.fieldfooter(fieldID))
num_fields += 1
# Now write out the footer to the entire simlib file
simlib_footer = self.simLibFooter(num_fields)
fh.write(simlib_footer)
return num_fields
class Simlibs(SynOpSim, SimlibMixin):
"""A class to write out simlibs to disk
"""
pixelSize = 0.2
host = None
user = None
telescope = 'LSST'
survey = 'LSST'
def simlibs_for_fields(self, surveyPix, mwebv=0.):
"""Generator for simlib fields for a sequence of fields
defined in a dataFrame called `surveyPix`. The dataFrame
`surveyPix` must have the following columns `simlibId`,
`ra`, `dec` and must be sorted in increasing order of
`simlibId`.
Parameters
----------
surveyPix : `pd.dataFrame`
with the following columns `simlibId`, `ra`, `dec`
mwebv : `np.float` defaults to 0.
A default value for the MW extinction
Returns
-------
a generator of fields for the simlib file
"""
surveyPix = surveyPix.reset_index().query('simlibId > -1').set_index('simlibId')
ra = surveyPix.ra.values
dec = surveyPix.dec.values
pts = self.pointingsEnclosing(ra, dec, circRadius=0.,
pointingRadius=1.75,
usePointingTree=True)
field = SimlibField()
for i, fieldID in enumerate(surveyPix.reset_index().simlibId.values):
field.setfields(fieldID, ra[i], dec[i],
next(pts).sort_values(by='expMJD'), mwebv=mwebv)
yield field
def get_surveyPix(self, surveydf, numFields=15, rng=np.random.RandomState(0)):
""" Get a random selection of survey pixels observed that have numbers
of visits in between the min and max visits.
Parameters
----------
surveydf : `pd.DataFrame`
a pandas dataframe with a collection of selected fields with at
least the the following columns a unique index `hid` for each
field, an `ra`, and a `dec`
numFields : integer, defaults to 15
number of samples of fields desired.
rng : instance of `np.random.RandomState`, defualts to using 0 as seed
a random state.
Returns
-------
a dataframe with at least `hid` the original index for each field, `ra`,
`dec`, and `simlibID` sorted by `simlibId`. This dataframe contains a
mapping from the new index `simlibId` to the old index `hid`
"""
surveydf['simlibId'] = -1
if numFields <= len(surveydf):
surveydf = surveydf.sample(n=numFields, replace=False,
random_state=rng)
# hids = rng.choice(surveydf.reset_index()['hid'].values, size=numFields,
# replace=False)
else:
surveydf = surveydf.sample(size=numFields, replace=True,
random_state=rng)
print("Warning: You have asked for more samples than the original number of fields")
print('Printing original number of fields instead')
hids = surveydf.reset_index()['hid'].values
surveydf.reset_index().set_index('hid')
surveydf.loc[hids, 'simlibId'] = np.arange(len(hids))
return surveydf
def randomSimlibs(self, numFields=50, fname='test.simlib',
rng=np.random.RandomState(1), outfile=None,
mapping_outfile='mapping.csv', mwebv=0.,
fieldtype=None, minVisits=1):
if fieldtype is None:
fieldtype = self.subset.upper()
if outfile is None:
outfile = fname + '.hdf'
fields = self.sampleRegion(numFields=numFields, rng=rng,
outfile=outfile, subset=self.subset,
minVisits=minVisits, nside=256,
mwebv=mwebv)
num_fields = self.writeSimlib(fname, fields, fieldtype=fieldtype, mwebv=mwebv)
fields = self.sampleRegion(numFields=numFields, rng=rng,
outfile=outfile, subset=self.subset)
df = pd.DataFrame(dict(SNANAID=np.arange(num_fields),
healpixID=list(field.fieldID for field in fields
)))
df.to_csv(mapping_outfile)
class Simlib(object):
def __init__(self, simlibDict, simlibMetaData=None):
self.simlibDict = simlibDict
self.fieldIDs = self.simlibDict.keys()
if simlibMetaData is not None:
self.meta = simlibMetaData
@classmethod
def fromSimlibFile(cls, simlibFileName):
'''
Constructor for class using an ASCII
Parameters
----------
simlibFileName: string, mandatory
absolute path to SNANA simlib file
Returns
-------
dictionary with LIBIDs (fieldIDs) of type int as keys, and the
corresponding FieldSimlib objects as values
Examples
--------
>>> sl = Simlib.fromSimlibFile(simlibFileName)
'''
file_header, file_data, file_footer = cls.read_simlibFile(simlibFileName)
mydict = cls.getSimlibs(file_data)
meta = cls.simlibMetaData(file_header)
cls = cls(simlibDict=mydict, simlibMetaData=meta)
cls.validate(file_footer)
return cls
def validate(self, file_footer):
'''
'''
numberlist = list(filter(lambda x: x.isdigit(), file_footer.split()))
if len(numberlist) !=1:
raise ValueError('There should only be one integer in the footer')
numLibId = int(numberlist[0])
if numLibId != len(self.fieldIDs):
raise ValueError('The number of fieldIDs is in the simlib does not match the number stated in footer')
return
@staticmethod
def simlibMetaData(simlibHeader):
'''
parse the string corresponding to the header of a SNANA simlib file to
get the simlib MetaData, stored in the form of a string valued
dictionary with the following keys:
'SURVEY', 'FILTERS', 'HOST', 'USER', 'COMMENT'
Parameters
----------
simlibHeader: string corresponding the header of an SNANA simlib file as
parsed by cls.read_simlibFile.
Returns
-------
dictionary of keys above and values.
'''
comments = []
fields = []
lines = simlibHeader.split('\n')
for line in lines:
if line.startswith('COMMENT') or line.startswith('#'):
comments.append(line)
else:
fields.append(line)
ss = ' '.join(fields)
words = ss.split()
keys = list(map(lambda x: x[:-1], words[0::2]))
vals = words[1::2]
if len(keys) != len(vals):
raise ValueError('the numberof fields in dict should match vals')
meta = dict(zip(keys, vals))
meta['COMMENTS'] = '\n'.join(comments)
return meta
def simlibData(self, fieldID):
return self.simlibDict[fieldID].data
@classmethod
def getSimlibs(cls, file_data):
# def getSimlibs(cls, simlibFile):
# file_header, file_data, file_footer = cls.read_simlibFile(simlibFile)
simlibStrings = cls.split_simlibStrings(file_data)
mydict = dict()
for strings in simlibStrings:
s = FieldSimlib.fromSimlibString(strings)
mydict[s.fieldID] = s
return mydict
@staticmethod
def read_simlibFile(simlibfile):
# slurp into a string
with open(simlibfile) as f:
ss = f.read()
# split into header, footer and data
fullfile = ss.split('BEGIN LIBGEN')
file_header = fullfile[0]
if 'END_OF_SIMLIB' in ss:
data, footer = fullfile[1].split('END_OF_SIMLIB')
else:
data = fullfile[1]
footer = ''
return file_header, data, footer
@staticmethod
def split_simlibStrings(simlibStrings):
simlibs = simlibStrings.split('\nLIBID')[1:]
simlibs = map(lambda x: 'LIBID' + x.split('# -')[0], simlibs)
return simlibs
class FieldSimlib(object):
"""
Class to hold data corresponding to a particular fieldID (LIBID) of a
SNANA SIMLIB file and methods. The fieldSimlib class for a particular field
has the following attributes, and may be instantiated by supplying these,
or can be conveniently constructed from the string corresponding to the
description of this data in an SNANA simlib file using the constructor
fromSimlibString:
Parameters
----------
simlibdata : a pandas dataFrame
simlib_meta : a dictionary
Attributes
----------
fieldID : int
a unique integer identifying the field of view. Different pointings
of the same field at different times (but possibly with dithers) are
associated with the same fieldID
meta : dict
metadata associated with the field, which has at least the following
keys:
LIBID, RA, DECL, MWEBV, NOBS, PIXSIZE
data : `~pd.DataFrame` object with the observations and having at least the
following columns: 'MJD', 'IDEXPT', 'FLT', 'GAIN', 'NOISE', 'SKYSIG',
'PSF1', 'PSF2', 'PSFRatio', 'ZPTAVG', 'ZPTERR', 'MAG']. The meanings of
these columns are discussed in the SNANA manual in the sub-section
'The 'SIMLIB' Observing file (4.7)
"""
def __init__(self, simlibdata, simlib_meta):
"""
Instantiate the class from the basic data
"""
self.data = simlibdata
self.meta = simlib_meta
self.fieldID = self.meta['LIBID']
@classmethod
def fromSimlibString(cls, simlibstring):
'''
Basic constructor method to take a string corresponding to a
field simlib data corresponding to a single LIBID and parse it to
metadata containing the properties of the field, a
`~pandas.DataFrame` containing the data, and the string after the
data
Parameters
----------
simlibstring : string, mandatory
'''
# split into three parts
header, data, footer = cls.split_simlibString(simlibstring)
# create the DataFrame
clsdata = cls.simlibdata(data)
# parse header to get header metadata and header fields
header_metadata, header_fields = cls.split_header(header)
clsmeta = cls.libid_metadata(header_metadata)
# Instantiate the class and make sure it works
myclass = cls(simlibdata=clsdata, simlib_meta=clsmeta)
myclass.validate(footer)
return myclass
def validate(self, validate_string):
"""
Validate the interpretation of the field simlib data from a field simlib
string by checking 1. the LIBID at the end of the string matches the
one at the beginnin (ie. somehow multiple fields have not been read in)
2. the number of rows of the data for this field simlib matches the
number of observations recorded in the metadata as NOBS
Parameters
----------
validate_string : string, mandatory
footer obtained by splitting the simlib corresponding to the field
usually of the form
"""
val = eval(validate_string.split()[-1])
if int(self.meta['LIBID']) != val:
print('LIBID value at beginning: ', self.meta['LIBID'])
print('LIBID value at the end', val)
raise ValueError('the LIBID values do not match')
if len(self.data) != self.meta['NOBS']:
print('NOBS :', self.meta['NOBS'])
print('len(data) :', len(self.data))
raise ValueError('the number of observations recorded does not'
'match size of data')
@staticmethod
def split_simlibString(simlibString):
'''
split the string corresponding to a simlib file into header, footer,
and data pieces
Parameters
----------
simlibString : string
'''
lst = simlibString.split('MAG')
header = lst[0]
data, val = lst[1].split('END_LIBID')
index = data.index('\n')
return header, data[index+1:], val
@staticmethod
def simlibdata(data):
'''
manipulate string in the simlibstring to form pandas DataFrame object
Parameters
----------
data : data
'''
fhandle = StringIO(data)
df = pd.read_csv(fhandle, delimiter="\s+",
names=['trash', 'MJD', 'IDEXPT', 'FLT', 'GAIN',
'NOISE', 'SKYSIG', 'PSF1', 'PSF2',
'PSFRatio', 'ZPTAVG', 'ZPTERR', 'MAG'])
del df['trash']
return df
@staticmethod
def split_header(header):
'''
split header string into metadata and field names
Parameters
----------
header : header
'''
lines = header.split('\n')
header_metadata = []
header_fields = []
for line in lines:
if line.startswith('#'):
header_fields.append(line[1:])
else:
header_metadata += line.split()
return header_metadata, header_fields
@staticmethod
def libid_metadata(header_metadata):
'''
parse header metadata string into a dictionary
Parameters
----------
header_metadata : header
'''
# Even index values 0, 2, 4 are keys
# remove ':' char at end
keys = list(map(lambda x: x[:-1], header_metadata[0::2]))
# odd index values are floats or ints
vals = list(map(eval, header_metadata[1::2]))
return dict(zip(keys, vals))
| mit |
ningchi/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/tsa/arima_process.py | 26 | 30878 | '''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from scipy import signal, optimize, linalg
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn,
burnin=0):
"""
Generate a random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
sample : array
sample of ARMA process given by ar, ma of length nsample
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -arparams] # add zero-lag and negate
>>> ma = np.r_[1, maparams] # add zero-lag
>>> y = sm.tsa.arma_generate_sample(ar, ma, 250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2 * nobs) # no idea right now how large is needed
else:
nobs_ir = max(100, 2 * nobs) # no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs])
for t in range(nobs)])
else:
acovf = np.correlate(ir, ir, 'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acf
Returns
-------
acf : array
autocorrelation of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned pacf
Returns
-------
pacf : array
partial autocorrelation of ARMA process given by ar, ma
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar, ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2, nobs+1):
r = acov[:k]
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print('Warning: nan in frequency response h, maybe a unit root')
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
`
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)``
fully tested against matlab
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print(ar,ma)
## print(ar_des.shape, ar_approx.shape)
## print(ar_des)
## print(ar_approx)
return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9 * np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)
#print(res)
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = []
rem = num
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
"""
Represent an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
Parameters
----------
ar : array_like, 1d
Coefficient for autoregressive lag polynomial, including zero lag.
See the notes for some information about the sign.
ma : array_like, 1d
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample is
generated. See example.
Notes
-----
As mentioned above, both the AR and MA components should include the
coefficient on the zero-lag. This is typically 1. Further, due to the
conventions used in signal processing used in signal.lfilter vs.
conventions in statistics for ARMA processes, the AR paramters should
have the opposite sign of what you might expect. See the examples below.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> ar = np.r_[1, -ar] # add zero-lag and negate
>>> ma = np.r_[1, ma] # add zero-lag
>>> arma_process = sm.tsa.ArmaProcess(ar, ma)
>>> arma_process.isstationary
True
>>> arma_process.isinvertible
True
>>> y = arma_process.generate_sample(250)
>>> model = sm.tsa.ARMA(y, (2, 2)).fit(trend='nc', disp=0)
>>> model.params
array([ 0.79044189, -0.23140636, 0.70072904, 0.40608028])
"""
# maybe needs special handling for unit roots
def __init__(self, ar, ma, nobs=100):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=100):
"""
Create ArmaProcess instance from coefficients of the lag-polynomials
Parameters
----------
arcoefs : array-like
Coefficient for autoregressive lag polynomial, not including zero
lag. The sign is inverted to conform to the usual time series
representation of an ARMA process in statistics. See the class
docstring for more information.
macoefs : array-like
Coefficient for moving-average lag polynomial, including zero lag
nobs : int, optional
Length of simulated time series. Used, for example, if a sample
is generated.
"""
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
"""
Create ArmaProcess instance from ARMA estimation results
Parameters
----------
model_results : ARMAResults instance
A fitted model
nobs : int, optional
If None, nobs is taken from the results
"""
arcoefs = model_results.arparams
macoefs = model_results.maparams
nobs = nobs or model_results.nobs
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(),
self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(),
self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
@property
def arroots(self):
"""
Roots of autoregressive lag-polynomial
"""
return self.arpoly.roots()
@property
def maroots(self):
"""
Roots of moving average lag-polynomial
"""
return self.mapoly.roots()
@property
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.arroots) > 1):
return True
else:
return False
@property
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.maroots) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
#TODO: variable returns like this?
pr = self.ma_roots()
insideroots = np.abs(pr) < 1
if insideroots.any():
pr[np.abs(pr) < 1] = 1./pr[np.abs(pr) < 1]
pnew = np.polynomial.Polynomial.fromroots(pr)
mainv = pnew.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0,
burnin=0):
'''generate ARMA samples
Parameters
----------
nsample : int or tuple of ints
If nsample is an integer, then this creates a 1d timeseries of
length size. If nsample is a tuple, then the timeseries is along
axis. All other axis have independent arma samples.
scale : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations
at the beginning of the sample are dropped
axis : int
See nsample.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not
tested yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(nsample) == 0:
nsample = [nsample]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(nsample)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(nsample)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print("\nExample 0")
arest = ARIMAProcess(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat)
print(cov_x)
print("\nExample 1")
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMAProcess(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print(rhohat1)
print(cov_x1)
err1 = arest.errfn(x=y1)
print(np.var(err1))
import statsmodels.api as sm
print(sm.regression.yule_walker(y1, order=2, inv=True))
print("\nExample 2")
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMAProcess(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print(rhohat2)
print(cov_x2)
err2 = arest.errfn(x=y2)
print(np.var(err2))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print(rhohat2a)
print(cov_x2a)
err2a = arest.errfn(x=y2)
print(np.var(err2a))
print(arest2.rhoy)
print(arest2.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y2, order=2, inv=True))
print("\nExample 20")
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMAProcess(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print(rhohat3)
print(cov_x3)
err3 = arest20.errfn(x=y3)
print(np.var(err3))
print(np.sqrt(np.dot(err3,err3)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print(rhohat3a)
print(cov_x3a)
err3a = arest20.errfn(x=y3)
print(np.var(err3a))
print(np.sqrt(np.dot(err3a,err3a)/nsample))
print(arest20.rhoy)
print(arest20.rhoe)
print("true")
print(ar)
print(ma)
print(sm.regression.yule_walker(y3, order=2, inv=True))
print("\nExample 02")
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMAProcess(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print(rhohat4)
print(cov_x4)
err4 = arest02.errfn(x=y4)
print(np.var(err4))
sige = np.sqrt(np.dot(err4,err4)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4)))
print(np.sqrt(np.diag(cov_x4)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print(rhohat4a)
print(cov_x4a)
err4a = arest02.errfn(x=y4)
print(np.var(err4a))
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print(sige)
print(sige * np.sqrt(np.diag(cov_x4a)))
print(np.sqrt(np.diag(cov_x4a)))
print(arest02.rhoy)
print(arest02.rhoe)
print("true")
print(ar)
print(ma)
import statsmodels.api as sm
print(sm.regression.yule_walker(y4, order=2, method='mle', inv=True))
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print(deconvolve(lagpolyproduct, ar2, n=None))
print(signal.deconvolve(lagpolyproduct, ar2))
print(deconvolve(lagpolyproduct, ar2, n=10))
| bsd-3-clause |
scollis/SHARPpy | sharppy/viz/map.py | 6 | 25946 |
import numpy as np
import sharppy
from PySide import QtGui, QtCore
import sys, os
import re
import urllib2
class Mapper(object):
data_dir = os.path.join(os.path.dirname(sharppy.__file__), 'databases', 'shapefiles')
min_lat = {'npstere':0., 'merc':-30., 'spstere':-90.}
max_lat = {'npstere':90., 'merc':30., 'spstere':0.}
def __init__(self, lambda_0, phi_0, proj='npstere'):
self.proj = proj
self.lambda_0 = lambda_0
self.phi_0 = phi_0
if proj == 'spstere':
self.phi_0 = -np.abs(self.phi_0)
self.m = 6.6667e-7
self.rad_earth = 6.371e8
self._bnds = {}
def getLambda0(self):
return self.lambda_0
def setLambda0(self, lambda_0):
self.lambda_0 = lambda_0
def getPhi0(self):
return self.phi_0
def setProjection(self, proj):
if proj not in ['npstere', 'spstere', 'merc']:
raise ValueError("Projection must be one of 'npstere', 'spstere', or 'merc'; got '%s'." % proj)
self.proj = proj
if proj == 'spstere':
self.phi_0 = -np.abs(self.phi_0)
elif proj == 'npstere':
self.phi_0 = np.abs(self.phi_0)
def getProjection(self):
return self.proj
def getCoordPaths(self):
path = QtGui.QPainterPath()
lb_lat, ub_lat = Mapper.min_lat[self.proj], Mapper.max_lat[self.proj]
if self.proj == 'npstere':
for lon in xrange(0, 360, 20):
lats = np.linspace(lb_lat, ub_lat, 2)
lx, ly = self(lats, lon)
path.moveTo(lx[0], ly[0])
for x, y in zip(lx, ly)[1:]:
path.lineTo(x, y)
for lat in xrange(int(lb_lat), int(ub_lat), 15):
lons = np.arange(self.getLambda0(), self.getLambda0() + 360, 90)
rx, ry = self(lat, lons)
x_min, x_max = rx.min(), rx.max()
y_min, y_max = ry.min(), ry.max()
path.addEllipse(x_min, y_min, x_max - x_min, y_max - y_min)
elif self.proj == 'merc':
for lon in xrange(-180, 180 + 20, 20):
lats = np.linspace(lb_lat, ub_lat, 2)
lx, ly = self(lats, lon)
path.moveTo(lx[0], ly[0])
for x, y in zip(lx, ly)[1:]:
path.lineTo(x, y)
for lat in xrange(int(lb_lat), int(ub_lat) + 10, 10):
lons = np.linspace(-180, 180, 2)
lx, ly = self(lat, lons)
path.moveTo(lx[0], ly[0])
for x, y in zip(lx, ly)[1:]:
path.lineTo(x, y)
elif self.proj == 'spstere':
for lon in xrange(0, 360, 20):
lats = np.linspace(lb_lat, ub_lat, 2)
lx, ly = self(lats, lon)
path.moveTo(lx[0], ly[0])
for x, y in zip(lx, ly)[1:]:
path.lineTo(x, y)
for lat in xrange(int(ub_lat), int(lb_lat), -15):
lons = np.arange(self.getLambda0(), self.getLambda0() + 360, 90)
rx, ry = self(lat, lons)
x_min, x_max = rx.min(), rx.max()
y_min, y_max = ry.min(), ry.max()
path.addEllipse(x_min, y_min, x_max - x_min, y_max - y_min)
return path
def getLatBounds(self):
return Mapper.min_lat[self.proj], Mapper.max_lat[self.proj]
def __call__(self, coord1, coord2, inverse=False):
if inverse:
if self.proj in ['npstere', 'spstere']:
return self._xytoll_stere(coord1, coord2, self.lambda_0, self.phi_0, self.m, self.rad_earth)
elif self.proj in ['merc']:
return self._xytoll_merc(coord1, coord2, self.lambda_0, self.m, self.rad_earth)
else:
if self.proj in ['npstere', 'spstere']:
return self._lltoxy_stere(coord1, coord2, self.lambda_0, self.phi_0, self.m, self.rad_earth)
elif self.proj in ['merc']:
return self._lltoxy_merc(coord1, coord2, self.lambda_0, self.m, self.rad_earth)
# Functions to perform the map transformation to North Pole Stereographic
# Equations from the SoM OBAN 2014 class
# Functions adapted for either hemisphere and inverse transformations added by Tim Supinie, April 2015
def _get_sigma(self, phi_0, lats, south_hemis=False):
sign = -1 if south_hemis else 1
sigma = (1. + np.sin(np.radians(sign * phi_0))) / (1. + np.sin(np.radians(sign * lats)))
return sigma
def _get_shifted_lon(self, lambda_0, lons, south_hemis=False):
sign = -1 if south_hemis else 1
return sign * (lambda_0 - lons)
def _lltoxy_stere(self, lats, lons, lambda_0, phi_0, m, rad_earth):
sigma = self._get_sigma(phi_0, lats, south_hemis=(phi_0 < 0))
lambdas = np.radians(self._get_shifted_lon(lambda_0 + 90, lons, south_hemis=(phi_0 < 0)))
x = m * sigma * rad_earth * np.cos(np.radians(lats)) * np.cos(lambdas)
y = m * sigma * rad_earth * np.cos(np.radians(lats)) * np.sin(lambdas)
return x, y
def _xytoll_stere(self, xs, ys, lambda_0, phi_0, m, rad_earth):
sign = -1 if (phi_0 < 0) else 1
lon = (lambda_0 + 90 - sign * np.degrees(np.arctan2(ys, xs)))
lat = sign * np.degrees(2 * np.arctan(rad_earth * m * (1 + sign * np.sin(np.radians(phi_0))) / np.hypot(xs, ys)) - np.pi / 2)
if lon < -180: lon += 360
elif lon > 180: lon -= 360
return lat, lon
# Function to perform map transformation to and from Mercator projection
def _lltoxy_merc(self, lats, lons, lambda_0, m, rad_earth):
x = m * rad_earth * (np.radians(lons) - np.radians(lambda_0))
y = -m * rad_earth * np.log(np.tan(np.pi / 4 + np.radians(lats) / 2))
if type(x) in [ np.ndarray ] or type(y) in [ np.ndarray ]:
if type(x) not in [ np.ndarray ]:
x = x * np.ones(y.shape)
if type(y) not in [ np.ndarray ]:
y = y * np.ones(x.shape)
return x, y
def _xytoll_merc(self, xs, ys, lambda_0, m, rad_earth):
lon = np.degrees(np.radians(lambda_0) + xs / (m * rad_earth))
lat = -np.degrees(2 * np.arctan(np.exp(ys / (m * rad_earth))) - np.pi / 2)
return lat, lon
def _loadDat(self, name, res):
"""
Code shamelessly lifted from Basemap's data file parser by Jeff Whitaker.
http://matplotlib.org/basemap/
"""
def segmentPath(b, lb_lat, ub_lat):
paths = []
if b[:, 1].max() <= lb_lat or b[:, 1].min() >= ub_lat:
return paths
idxs = np.where((b[:, 1] >= lb_lat) & (b[:, 1] <= ub_lat))[0]
if len(idxs) < 2:
return paths
segs = (np.diff(idxs) == 1)
try:
breaks = np.where(segs == 0)[0] + 1
except IndexError:
breaks = []
breaks = [ 0 ] + list(breaks) + [ -1 ]
for idx in xrange(len(breaks) - 1):
if breaks[idx + 1] == -1:
seg_idxs = idxs[breaks[idx]:]
else:
seg_idxs = idxs[breaks[idx]:breaks[idx + 1]]
if len(seg_idxs) >= 2:
paths.append(b[seg_idxs, ::-1])
return paths
bdatfile = open(os.path.join(Mapper.data_dir, name + '_' + res + '.dat'), 'rb')
bdatmetafile = open(os.path.join(Mapper.data_dir, name + 'meta_' + res + '.dat'), 'r')
projs = ['npstere', 'merc', 'spstere']
paths = dict( (p, []) for p in projs )
# old_proj = self.proj
for line in bdatmetafile:
lats, lons = [], []
linesplit = line.split()
area = float(linesplit[1])
south = float(linesplit[3])
north = float(linesplit[4])
if area < 0:
area = 1e30
if area > 1500.:
typ = int(linesplit[0])
npts = int(linesplit[2])
offsetbytes = int(linesplit[5])
bytecount = int(linesplit[6])
bdatfile.seek(offsetbytes,0)
# read in binary string convert into an npts by 2
# numpy array (first column is lons, second is lats).
polystring = bdatfile.read(bytecount)
# binary data is little endian.
b = np.array(np.fromstring(polystring,dtype='<f4'),'f8')
b.shape = (npts, 2)
if np.any(b[:, 0] > 180):
b[:, 0] -= 360
for proj in projs:
lb_lat, ub_lat = Mapper.min_lat[proj], Mapper.max_lat[proj]
path = segmentPath(b, lb_lat, ub_lat)
paths[proj].extend(path)
return paths
def getBoundary(self, name):
if name == 'coastlines':
name = 'gshhs'
if name == 'states':
res = 'h'
elif name == 'uscounties':
res = 'f'
else:
res = 'i'
if name not in self._bnds:
self._bnds[name] = self._loadDat(name, res)
paths = []
for bnd in self._bnds[name][self.proj]:
path = QtGui.QPainterPath()
path_lats, path_lons = zip(*bnd)
path_x, path_y = self(np.array(path_lats), np.array(path_lons))
path.moveTo(path_x[0], path_y[0])
for px, py in zip(path_x, path_y)[1:]:
path.lineTo(px, py)
paths.append(path)
return paths
class MapWidget(QtGui.QWidget):
clicked = QtCore.Signal(dict)
def __init__(self, data_source, init_time, async, **kwargs):
config = kwargs.get('cfg', None)
del kwargs['cfg']
super(MapWidget, self).__init__(**kwargs)
self.trans_x, self.trans_y = 0., 0.
self.center_x, self.center_y = 0., 0.
self.init_drag_x, self.init_drag_y = None, None
self.dragging = False
self.map_rot = 0.0
self.setMouseTracking(True)
self.has_internet = True
self.init_scale = 0.6
if config is None or not config.has_section('map'):
self.scale = self.init_scale
self.map_center_x, self.map_center_y = 0., 0.
std_lon = -97.5
proj = 'npstere'
init_from_config = False
else:
proj = config.get('map', 'proj')
std_lon = float(config.get('map', 'std_lon'))
self.scale = float(config.get('map', 'scale'))
self.map_center_x = float(config.get('map', 'center_x'))
self.map_center_y = float(config.get('map', 'center_y'))
init_from_config = True
self.mapper = Mapper(std_lon, 60., proj=proj)
self.stn_lats = np.array([])
self.stn_lons = np.array([])
self.stn_ids = []
self.stn_names = []
self.default_width, self.default_height = self.width(), self.height()
self.setMinimumSize(self.width(), self.height())
self.clicked_stn = None
self.stn_readout = QtGui.QLabel(parent=self)
self.stn_readout.setStyleSheet("QLabel { background-color:#000000; border-width: 0px; font-size: 16px; color: #FFFFFF; }")
self.stn_readout.setText("")
self.stn_readout.show()
self.stn_readout.move(self.width(), self.height())
self.load_readout = QtGui.QLabel(parent=self)
self.load_readout.setStyleSheet("QLabel { background-color:#000000; border-width: 0px; font-size: 18px; color: #FFFFFF; }")
self.load_readout.setText("Loading ...")
self.load_readout.setFixedWidth(100)
self.load_readout.show()
self.load_readout.move(self.width(), self.height())
self.latlon_readout = QtGui.QLabel(parent=self)
self.latlon_readout.setStyleSheet("QLabel { background-color:#000000; border-width: 0px; font-size: 18px; color: #FFFFFF; }")
self.latlon_readout.setText("")
self.latlon_readout.setFixedWidth(150)
self.latlon_readout.show()
self.latlon_readout.move(10, 10)
self.no_internet = QtGui.QLabel(parent=self)
self.no_internet.setStyleSheet("QLabel { background-color:#000000; border-width: 0px; font-size: 32px; color: #FFFFFF; }")
self.no_internet.setText("No Internet Connection")
self.no_internet.show()
txt_width = self.no_internet.fontMetrics().width(self.no_internet.text())
self.no_internet.setFixedWidth(txt_width)
self.no_internet.move(self.width(), self.height())
self.async = async
self.setDataSource(data_source, init_time, init=True)
self.setWindowTitle('SHARPpy')
if not init_from_config:
self.resetViewport()
self.saveProjection(config)
self.initMap()
self.initUI()
def initUI(self):
self.center_x, self.center_y = self.width() / 2, self.height() / 2
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.drawMap()
def initMap(self):
self._coast_path = self.mapper.getBoundary('coastlines')
self._country_path = self.mapper.getBoundary('countries')
self._state_path = self.mapper.getBoundary('states')
self._county_path = self.mapper.getBoundary('uscounties')
self._grid_path = self.mapper.getCoordPaths()
def setDataSource(self, data_source, data_time, init=False):
self.cur_source = data_source
self.setCurrentTime(data_time, init=init)
def setCurrentTime(self, data_time, init=False):
self.current_time = data_time
self.clicked_stn = None
self.clicked.emit(None)
self._showLoading()
getPoints = lambda: self.cur_source.getAvailableAtTime(self.current_time)
def update(points):
self.points = points[0]
self.stn_lats = np.array([ p['lat'] for p in self.points ])
self.stn_lons = np.array([ p['lon'] for p in self.points ])
self.stn_ids = [ p['srcid'] for p in self.points ]
self.stn_names = []
for p in self.points:
if p['icao'] != "":
id_str = " (%s)" % p['icao']
else:
id_str = ""
if p['state'] != "":
pol_str = ", %s" % p['state']
elif p['country'] != "":
pol_str = ", %s" % p['country']
else:
pol_str = ""
nm = p['name']
if id_str == "" and pol_str == "":
nm = nm.upper()
name = "%s%s%s" % (nm, pol_str, id_str)
self.stn_names.append(name)
self._hideLoading()
if not init:
self.drawMap()
self.update()
if init:
points = getPoints()
update([ points ])
else:
self.async.post(getPoints, update)
def setProjection(self, proj):
self.mapper.setProjection(proj)
self.resetViewport()
self._showLoading()
def update(args):
self.resetViewport()
self.drawMap()
self._hideLoading()
self.update()
return
self.async.post(self.initMap, update)
def resetViewport(self, ctr_lat=None, ctr_lon=None):
self.map_center_x = self.width() / 2
if ctr_lat is not None and ctr_lon is not None:
center_x, center_y = self.mapper(ctr_lat, ctr_lon)
self.map_center_y = -center_y + self.height() / 2
self.map_center_y = self.center_y - (self.center_y - self.map_center_y) / self.scale
else:
self.scale = self.init_scale
proj = self.mapper.getProjection()
if proj == 'npstere':
self.map_center_y = -13 * self.height() / 10 + self.height() / 2
elif proj == 'merc':
self.map_center_y = self.height() / 2
elif proj == 'spstere':
self.map_center_y = 13 * self.height() / 10 + self.height() / 2
def drawMap(self):
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
# qp.rotate(self.map_rot)
self.plotBitMap.fill(QtCore.Qt.black)
map_center_x = self.map_center_x + self.trans_x
map_center_y = self.map_center_y + self.trans_y
qp.translate(map_center_x, map_center_y)
qp.scale(1. / self.scale, 1. / self.scale)
self.transform = qp.transform()
window_rect = QtCore.QRect(0, 0, self.width(), self.height())
qp.setPen(QtGui.QPen(QtGui.QColor('#333333'))) #, self.scale, QtCore.Qt.DashLine
qp.drawPath(self._grid_path)
# Modify the scale thresholds according to the ratio of the area of the plot to the default area
default_area = self.default_width * self.default_height
actual_area = self.width() * self.height()
scaled_area = np.sqrt(default_area / float(actual_area))
if self.scale < 0.15 * scaled_area:
max_comp = 102
full_scale = 0.10 * scaled_area
zero_scale = 0.15 * scaled_area
comp = max_comp * min(max((zero_scale - self.scale) / (zero_scale - full_scale), 0), 1)
color = '#' + ("{0:02x}".format(int(round(comp)))) * 3
qp.setPen(QtGui.QPen(QtGui.QColor(color)))
for cp in self._county_path:
if self.transform.mapRect(cp.boundingRect()).intersects(window_rect):
qp.drawPath(cp)
qp.setPen(QtGui.QPen(QtGui.QColor('#999999')))
for sp in self._state_path:
if self.transform.mapRect(sp.boundingRect()).intersects(window_rect):
qp.drawPath(sp)
qp.setPen(QtGui.QPen(QtCore.Qt.white))
for cp in self._coast_path:
if self.transform.mapRect(cp.boundingRect()).intersects(window_rect):
qp.drawPath(cp)
for cp in self._country_path:
if self.transform.mapRect(cp.boundingRect()).intersects(window_rect):
qp.drawPath(cp)
self.drawStations(qp)
qp.end()
def drawStations(self, qp):
stn_xs, stn_ys = self.mapper(self.stn_lats, self.stn_lons)
lb_lat, ub_lat = self.mapper.getLatBounds()
size = 3 * self.scale
unselected_color = QtCore.Qt.red
selected_color = QtCore.Qt.green
window_rect = QtCore.QRect(0, 0, self.width(), self.height())
clicked_x, clicked_y, clicked_lat, clicked_id = None, None, None, None
color = unselected_color
for stn_x, stn_y, stn_lat, stn_id in zip(stn_xs, stn_ys, self.stn_lats, self.stn_ids):
if self.clicked_stn == stn_id:
clicked_x = stn_x
clicked_y = stn_y
clicked_lat = stn_lat
clicked_id = stn_id
else:
if lb_lat <= stn_lat and stn_lat <= ub_lat and window_rect.contains(*self.transform.map(stn_x, stn_y)):
qp.setPen(QtGui.QPen(color))
qp.setBrush(QtGui.QBrush(color))
qp.drawEllipse(QtCore.QPointF(stn_x, stn_y), size, size)
color = selected_color
if lb_lat <= clicked_lat and clicked_lat <= ub_lat and window_rect.contains(*self.transform.map(clicked_x, clicked_y)):
qp.setPen(QtGui.QPen(color))
qp.setBrush(QtGui.QBrush(color))
qp.drawEllipse(QtCore.QPointF(clicked_x, clicked_y), size, size)
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(0, 0, self.plotBitMap)
qp.end()
def resizeEvent(self, e):
old_size = e.oldSize()
new_size = e.size()
if old_size.width() == -1 and old_size.height() == -1:
old_size = self.size()
self.map_center_x += (new_size.width() - old_size.width()) / 2.
self.map_center_y += (new_size.height() - old_size.height()) / 2.
self._hideLoading()
self.hasInternet(self.has_internet)
self.initUI()
def mousePressEvent(self, e):
self.init_drag_x, self.init_drag_y = e.x(), e.y()
def mouseMoveEvent(self, e):
if self.init_drag_x is not None and self.init_drag_y is not None:
self.dragging = True
self.trans_x = e.x() - self.init_drag_x
self.trans_y = e.y() - self.init_drag_y
self.drawMap()
self.update()
self._checkStations(e)
trans_inv, is_invertible = self.transform.inverted()
mouse_x, mouse_y = trans_inv.map(e.x(), e.y())
lat, lon = self.mapper(mouse_x, mouse_y, inverse=True)
self.latlon_readout.setText("%.3f; %.3f" % (lat, lon))
def mouseReleaseEvent(self, e):
self.init_drag_x, self.init_drag_y = None, None
self.map_center_x += self.trans_x
self.map_center_y += self.trans_y
self.trans_x, self.trans_y = 0, 0
if not self.dragging and len(self.stn_lats) > 0:
stn_xs, stn_ys = self.mapper(self.stn_lats, self.stn_lons)
stn_xs, stn_ys = zip(*[ self.transform.map(sx, sy) for sx, sy in zip(stn_xs, stn_ys) ])
stn_xs = np.array(stn_xs)
stn_ys = np.array(stn_ys)
dists = np.hypot(stn_xs - e.x(), stn_ys - e.y())
stn_idx = np.argmin(dists)
if dists[stn_idx] <= 5:
self.clicked_stn = self.stn_ids[stn_idx]
self.clicked.emit(self.points[stn_idx])
self.drawMap()
self.update()
self.dragging = False
def mouseDoubleClickEvent(self, e):
trans_inv, is_invertible = self.transform.inverted()
mouse_x, mouse_y = trans_inv.map(e.x(), e.y())
lat, lon = self.mapper(mouse_x, mouse_y, inverse=True)
self.mapper.setLambda0(lon)
self._showLoading()
def update(args):
self.resetViewport(ctr_lat=lat, ctr_lon=lon)
self.drawMap()
self._hideLoading()
self.update()
return
self.async.post(self.initMap, update)
def wheelEvent(self, e):
max_speed = 75
delta = max(min(-e.delta(), max_speed), -max_speed)
scale_fac = 10 ** (delta / 1000.)
scaled_size = float(min(self.default_width, self.default_height)) / min(self.width(), self.height())
if self.scale * scale_fac > 2.5 * scaled_size:
scale_fac = 2.5 * scaled_size / self.scale
self.scale *= scale_fac
self.map_center_x = self.center_x - (self.center_x - self.map_center_x) / scale_fac
self.map_center_y = self.center_y - (self.center_y - self.map_center_y) / scale_fac
self.drawMap()
self._checkStations(e)
self.update()
def saveProjection(self, config):
map_center_x = self.map_center_x + (self.default_width - self.width() ) / 2.
map_center_y = self.map_center_y + (self.default_height - self.height()) / 2.
if not config.has_section('map'):
config.add_section('map')
config.set('map', 'proj', self.mapper.getProjection())
config.set('map', 'std_lon', self.mapper.getLambda0())
config.set('map', 'scale', self.scale)
config.set('map', 'center_x', map_center_x)
config.set('map', 'center_y', map_center_y)
def hasInternet(self, has_connection):
self.has_internet = has_connection
if has_connection:
self.no_internet.move(self.width(), self.height())
else:
met = self.no_internet.fontMetrics()
txt_width = met.width(self.no_internet.text())
txt_height = met.height()
self.no_internet.move((self.width() - txt_width) / 2, (self.height() - txt_height) / 2)
def _showLoading(self):
self.load_readout.move(10, self.height() - 25)
def _hideLoading(self):
self.load_readout.move(self.width(), self.height())
def _checkStations(self, e):
stn_xs, stn_ys = self.mapper(self.stn_lats, self.stn_lons)
if len(stn_xs) == 0 or len(stn_ys) == 0:
return
stn_xs, stn_ys = zip(*[ self.transform.map(sx, sy) for sx, sy in zip(stn_xs, stn_ys) ])
stn_xs = np.array(stn_xs)
stn_ys = np.array(stn_ys)
dists = np.hypot(stn_xs - e.x(), stn_ys - e.y())
stn_idx = np.argmin(dists)
if dists[stn_idx] <= 5:
stn_x, stn_y = stn_xs[stn_idx], stn_ys[stn_idx]
fm = QtGui.QFontMetrics(QtGui.QFont(self.font().rawName(), 16))
label_offset = 5
align = 0
if stn_x > self.width() / 2:
sgn_x = -1
label_x = stn_x - fm.width(self.stn_names[stn_idx])
align |= QtCore.Qt.AlignRight
else:
sgn_x = 1
label_x = stn_x
align |= QtCore.Qt.AlignLeft
if stn_y > self.height() / 2:
sgn_y = -1
label_y = stn_y - fm.height()
align |= QtCore.Qt.AlignBottom
else:
sgn_y = 1
label_y = stn_y
align |= QtCore.Qt.AlignTop
self.stn_readout.setText(self.stn_names[stn_idx])
self.stn_readout.move(label_x + sgn_x * label_offset, label_y + sgn_y * label_offset)
self.stn_readout.setFixedWidth(fm.width(self.stn_names[stn_idx]))
self.stn_readout.setAlignment(align)
self.setCursor(QtCore.Qt.PointingHandCursor)
else:
self.stn_readout.setText("")
self.stn_readout.setFixedWidth(0)
self.stn_readout.move(self.width(), self.height())
self.stn_readout.setAlignment(QtCore.Qt.AlignLeft)
self.unsetCursor()
| bsd-3-clause |
UpSea/midProjects | 01_aat-ebook-full-source-code-20160430/chapter4/bayes-binomial-mcmc.py | 1 | 1675 | import matplotlib.pyplot as plt
import numpy as np
import pymc3
import scipy.stats as stats
plt.style.use("ggplot")
# Parameter values for prior and analytic posterior
n = 50
z = 10
alpha = 12
beta = 12
alpha_post = 22
beta_post = 52
# How many samples to carry out for MCMC
iterations = 100000
# Use PyMC3 to construct a model context
basic_model = pymc3.Model()
with basic_model:
# Define our prior belief about the fairness
# of the coin using a Beta distribution
theta = pymc3.Beta("theta", alpha=alpha, beta=beta)
# Define the Bernoulli likelihood function
y = pymc3.Binomial("y", n=n, p=theta, observed=z)
# Carry out the MCMC analysis using the Metropolis algorithm
# Use Maximum A Posteriori (MAP) optimisation as initial value for MCMC
start = pymc3.find_MAP()
# Use the Metropolis algorithm (as opposed to NUTS or HMC, etc.)
step = pymc3.Metropolis()
# Calculate the trace
trace = pymc3.sample(iterations, step, start, random_seed=1, progressbar=True)
# Plot the posterior histogram from MCMC analysis
bins=50
plt.hist(
trace["theta"], bins,
histtype="step", normed=True,
label="Posterior (MCMC)", color="red"
)
# Plot the analytic prior and posterior beta distributions
x = np.linspace(0, 1, 100)
plt.plot(
x, stats.beta.pdf(x, alpha, beta),
"--", label="Prior", color="blue"
)
plt.plot(
x, stats.beta.pdf(x, alpha_post, beta_post),
label='Posterior (Analytic)', color="green"
)
# Update the graph labels
plt.legend(title="Parameters", loc="best")
plt.xlabel("$\\theta$, Fairness")
plt.ylabel("Density")
plt.show()
# Show the trace plot
pymc3.traceplot(trace)
plt.show()
| mit |
depet/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 8 | 2588 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate(10. ** np.arange(1, 4)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%d" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = pl.subplot(3, 2, 2 * i + 1)
l2_plot = pl.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
pl.text(-8, 3, "C = %d" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
pl.show()
| bsd-3-clause |
anntzer/scikit-learn | sklearn/compose/_column_transformer.py | 2 | 33357 | """
The :mod:`sklearn.compose._column_transformer` module implements utilities
to work with heterogeneous data and to apply different transformers to
different columns.
"""
# Author: Andreas Mueller
# Joris Van den Bossche
# License: BSD
from itertools import chain
import numpy as np
from scipy import sparse
from joblib import Parallel
from ..base import clone, TransformerMixin
from ..utils._estimator_html_repr import _VisualBlock
from ..pipeline import _fit_transform_one, _transform_one, _name_estimators
from ..preprocessing import FunctionTransformer
from ..utils import Bunch
from ..utils import _safe_indexing
from ..utils import _get_column_indices
from ..utils.metaestimators import _BaseComposition
from ..utils.validation import check_array, check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
__all__ = [
'ColumnTransformer', 'make_column_transformer', 'make_column_selector'
]
_ERR_MSG_1DCOLUMN = ("1D data passed to a transformer that expects 2D data. "
"Try to specify the column selection as a list of one "
"item instead of a scalar.")
class ColumnTransformer(TransformerMixin, _BaseComposition):
"""Applies transformers to columns of an array or pandas DataFrame.
This estimator allows different columns or column subsets of the input
to be transformed separately and the features generated by each transformer
will be concatenated to form a single feature space.
This is useful for heterogeneous or columnar data, to combine several
feature extraction mechanisms or transformations into a single transformer.
Read more in the :ref:`User Guide <column_transformer>`.
.. versionadded:: 0.20
Parameters
----------
transformers : list of tuples
List of (name, transformer, columns) tuples specifying the
transformer objects to be applied to subsets of the data.
name : str
Like in Pipeline and FeatureUnion, this allows the transformer and
its parameters to be set using ``set_params`` and searched in grid
search.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support :term:`fit` and :term:`transform`.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively.
columns : str, array-like of str, int, array-like of int, \
array-like of bool, slice or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support :term:`fit` and :term:`transform`.
Note that using this feature requires that the DataFrame columns
input at :term:`fit` and :term:`transform` have identical order.
sparse_threshold : float, default=0.3
If the output of the different transformers contains sparse matrices,
these will be stacked as a sparse matrix if the overall density is
lower than this value. Use ``sparse_threshold=0`` to always return
dense. When the transformed output consists of all dense data, the
stacked result will be dense, and this keyword will be ignored.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
transformer_weights : dict, default=None
Multiplicative weights for features per transformer. The output of the
transformer is multiplied by these weights. Keys are transformer names,
values the weights.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Attributes
----------
transformers_ : list
The collection of fitted transformers as tuples of
(name, fitted_transformer, column). `fitted_transformer` can be an
estimator, 'drop', or 'passthrough'. In case there were no columns
selected, this will be the unfitted transformer.
If there are remaining columns, the final element is a tuple of the
form:
('remainder', transformer, remaining_columns) corresponding to the
``remainder`` parameter. If there are remaining columns, then
``len(transformers_)==len(transformers)+1``, otherwise
``len(transformers_)==len(transformers)``.
named_transformers_ : :class:`~sklearn.utils.Bunch`
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
sparse_output_ : bool
Boolean flag indicating whether the output of ``transform`` is a
sparse matrix or a dense numpy array, which depends on the output
of the individual transformers and the `sparse_threshold` keyword.
Notes
-----
The order of the columns in the transformed feature matrix follows the
order of how the columns are specified in the `transformers` list.
Columns of the original feature matrix that are not specified are
dropped from the resulting transformed feature matrix, unless specified
in the `passthrough` keyword. Those columns specified with `passthrough`
are added at the right to the output of the transformers.
See Also
--------
make_column_transformer : Convenience function for
combining the outputs of multiple transformer objects applied to
column subsets of the original feature space.
make_column_selector : Convenience function for selecting
columns based on datatype or the columns name with a regex pattern.
Examples
--------
>>> import numpy as np
>>> from sklearn.compose import ColumnTransformer
>>> from sklearn.preprocessing import Normalizer
>>> ct = ColumnTransformer(
... [("norm1", Normalizer(norm='l1'), [0, 1]),
... ("norm2", Normalizer(norm='l1'), slice(2, 4))])
>>> X = np.array([[0., 1., 2., 2.],
... [1., 1., 0., 1.]])
>>> # Normalizer scales each row of X to unit norm. A separate scaling
>>> # is applied for the two first and two last elements of each
>>> # row independently.
>>> ct.fit_transform(X)
array([[0. , 1. , 0.5, 0.5],
[0.5, 0.5, 0. , 1. ]])
"""
_required_parameters = ['transformers']
@_deprecate_positional_args
def __init__(self,
transformers, *,
remainder='drop',
sparse_threshold=0.3,
n_jobs=None,
transformer_weights=None,
verbose=False):
self.transformers = transformers
self.remainder = remainder
self.sparse_threshold = sparse_threshold
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self.verbose = verbose
@property
def _transformers(self):
"""
Internal list of transformer only containing the name and
transformers, dropping the columns. This is for the implementation
of get_params via BaseComposition._get_params which expects lists
of tuples of len 2.
"""
return [(name, trans) for name, trans, _ in self.transformers]
@_transformers.setter
def _transformers(self, value):
self.transformers = [
(name, trans, col) for ((name, trans), (_, _, col))
in zip(value, self.transformers)]
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `transformers` of the
`ColumnTransformer`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return self._get_params('_transformers', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that you
can directly set the parameters of the estimators contained in
`transformers` of `ColumnTransformer`.
Returns
-------
self
"""
self._set_params('_transformers', **kwargs)
return self
def _iter(self, fitted=False, replace_strings=False):
"""
Generate (name, trans, column, weight) tuples.
If fitted=True, use the fitted transformers, else use the
user specified transformers updated with converted column names
and potentially appended with transformer for remainder.
"""
if fitted:
transformers = self.transformers_
else:
# interleave the validated column specifiers
transformers = [
(name, trans, column) for (name, trans, _), column
in zip(self.transformers, self._columns)
]
# add transformer tuple for remainder
if self._remainder[2] is not None:
transformers = chain(transformers, [self._remainder])
get_weight = (self.transformer_weights or {}).get
for name, trans, column in transformers:
if replace_strings:
# replace 'passthrough' with identity transformer and
# skip in case of 'drop'
if trans == 'passthrough':
trans = FunctionTransformer(
accept_sparse=True, check_inverse=False
)
elif trans == 'drop':
continue
elif _is_empty_column_selection(column):
continue
yield (name, trans, column, get_weight(name))
def _validate_transformers(self):
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t in ('drop', 'passthrough'):
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform, or can be 'drop' or 'passthrough' "
"specifiers. '%s' (type %s) doesn't." %
(t, type(t)))
def _validate_column_callables(self, X):
"""
Converts callable column specifications.
"""
columns = []
for _, _, column in self.transformers:
if callable(column):
column = column(X)
columns.append(column)
self._columns = columns
def _validate_remainder(self, X):
"""
Validates ``remainder`` and defines ``_remainder`` targeting
the remaining columns.
"""
is_transformer = ((hasattr(self.remainder, "fit")
or hasattr(self.remainder, "fit_transform"))
and hasattr(self.remainder, "transform"))
if (self.remainder not in ('drop', 'passthrough')
and not is_transformer):
raise ValueError(
"The remainder keyword needs to be one of 'drop', "
"'passthrough', or estimator. '%s' was passed instead" %
self.remainder)
self._n_features = X.shape[1]
cols = []
for columns in self._columns:
cols.extend(_get_column_indices(X, columns))
remaining_idx = sorted(set(range(self._n_features)) - set(cols))
self._remainder = ('remainder', self.remainder, remaining_idx or None)
@property
def named_transformers_(self):
"""Access the fitted transformer by name.
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
"""
# Use Bunch object to improve autocomplete
return Bunch(**{name: trans for name, trans, _
in self.transformers_})
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
check_is_fitted(self)
feature_names = []
for name, trans, column, _ in self._iter(fitted=True):
if trans == 'drop' or _is_empty_column_selection(column):
continue
if trans == 'passthrough':
if self._feature_names_in is not None:
if ((not isinstance(column, slice))
and all(isinstance(col, str) for col in column)):
feature_names.extend(column)
else:
feature_names.extend(self._feature_names_in[column])
else:
indices = np.arange(self._n_features)
feature_names.extend(['x%d' % i for i in indices[column]])
continue
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names."
% (str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def _update_fitted_transformers(self, transformers):
# transformers are fitted; excludes 'drop' cases
fitted_transformers = iter(transformers)
transformers_ = []
for name, old, column, _ in self._iter():
if old == 'drop':
trans = 'drop'
elif old == 'passthrough':
# FunctionTransformer is present in list of transformers,
# so get next transformer, but save original string
next(fitted_transformers)
trans = 'passthrough'
elif _is_empty_column_selection(column):
trans = old
else:
trans = next(fitted_transformers)
transformers_.append((name, trans, column))
# sanity check that transformers is exhausted
assert not list(fitted_transformers)
self.transformers_ = transformers_
def _validate_output(self, result):
"""
Ensure that the output of each transformer is 2D. Otherwise
hstack can raise an error or produce incorrect results.
"""
names = [name for name, _, _, _ in self._iter(fitted=True,
replace_strings=True)]
for Xs, name in zip(result, names):
if not getattr(Xs, 'ndim', 0) == 2:
raise ValueError(
"The output of the '{0}' transformer should be 2D (scipy "
"matrix, array, or pandas DataFrame).".format(name))
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return '(%d of %d) Processing %s' % (idx, total, name)
def _fit_transform(self, X, y, func, fitted=False):
"""
Private function to fit and/or transform on demand.
Return value (transformers and/or transformed X data) depends
on the passed function.
``fitted=True`` ensures the fitted transformers are used.
"""
transformers = list(
self._iter(fitted=fitted, replace_strings=True))
try:
return Parallel(n_jobs=self.n_jobs)(
delayed(func)(
transformer=clone(trans) if not fitted else trans,
X=_safe_indexing(X, column, axis=1),
y=y,
weight=weight,
message_clsname='ColumnTransformer',
message=self._log_message(name, idx, len(transformers)))
for idx, (name, trans, column, weight) in enumerate(
transformers, 1))
except ValueError as e:
if "Expected 2D array, got 1D array instead" in str(e):
raise ValueError(_ERR_MSG_1DCOLUMN) from e
else:
raise
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,...), default=None
Targets for supervised learning.
Returns
-------
self : ColumnTransformer
This estimator
"""
# we use fit_transform to make sure to set sparse_output_ (for which we
# need the transformed data) to have consistent output type in predict
self.fit_transform(X, y=y)
return self
def fit_transform(self, X, y=None):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,), default=None
Targets for supervised learning.
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
# TODO: this should be `feature_names_in_` when we start having it
if hasattr(X, "columns"):
self._feature_names_in = np.asarray(X.columns)
else:
self._feature_names_in = None
X = _check_X(X)
# set n_features_in_ attribute
self._check_n_features(X, reset=True)
self._validate_transformers()
self._validate_column_callables(X)
self._validate_remainder(X)
result = self._fit_transform(X, y, _fit_transform_one)
if not result:
self._update_fitted_transformers([])
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
# determine if concatenated output will be sparse or not
if any(sparse.issparse(X) for X in Xs):
nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
total = sum(X.shape[0] * X.shape[1] if sparse.issparse(X)
else X.size for X in Xs)
density = nnz / total
self.sparse_output_ = density < self.sparse_threshold
else:
self.sparse_output_ = False
self._update_fitted_transformers(transformers)
self._validate_output(Xs)
return self._hstack(list(Xs))
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be transformed by subset.
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
check_is_fitted(self)
X = _check_X(X)
if hasattr(X, "columns"):
X_feature_names = np.asarray(X.columns)
else:
X_feature_names = None
self._check_n_features(X, reset=False)
if (self._feature_names_in is not None and
X_feature_names is not None and
np.any(self._feature_names_in != X_feature_names)):
raise RuntimeError(
"Given feature/column names do not match the ones for the "
"data given during fit."
)
Xs = self._fit_transform(X, None, _transform_one, fitted=True)
self._validate_output(Xs)
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return self._hstack(list(Xs))
def _hstack(self, Xs):
"""Stacks Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer.
Parameters
----------
Xs : list of {array-like, sparse matrix, dataframe}
"""
if self.sparse_output_:
try:
# since all columns should be numeric before stacking them
# in a sparse matrix, `check_array` is used for the
# dtype conversion if necessary.
converted_Xs = [check_array(X,
accept_sparse=True,
force_all_finite=False)
for X in Xs]
except ValueError as e:
raise ValueError(
"For a sparse output, all columns should "
"be a numeric or convertible to a numeric."
) from e
return sparse.hstack(converted_Xs).tocsr()
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
return np.hstack(Xs)
def _sk_visual_block_(self):
if isinstance(self.remainder, str) and self.remainder == 'drop':
transformers = self.transformers
elif hasattr(self, "_remainder"):
remainder_columns = self._remainder[2]
if self._feature_names_in is not None:
remainder_columns = (
self._feature_names_in[remainder_columns].tolist()
)
transformers = chain(self.transformers,
[('remainder', self.remainder,
remainder_columns)])
else:
transformers = chain(self.transformers,
[('remainder', self.remainder, '')])
names, transformers, name_details = zip(*transformers)
return _VisualBlock('parallel', transformers,
names=names, name_details=name_details)
def _check_X(X):
"""Use check_array only on lists and other non-array-likes / sparse"""
if hasattr(X, '__array__') or sparse.issparse(X):
return X
return check_array(X, force_all_finite='allow-nan', dtype=object)
def _is_empty_column_selection(column):
"""
Return True if the column selection is empty (empty list or all-False
boolean array).
"""
if hasattr(column, 'dtype') and np.issubdtype(column.dtype, np.bool_):
return not column.any()
elif hasattr(column, '__len__'):
return (len(column) == 0 or
all(isinstance(col, bool) for col in column)
and not any(column))
else:
return False
def _get_transformer_list(estimators):
"""
Construct (name, trans, column) tuples from list
"""
transformers, columns = zip(*estimators)
names, _ = zip(*_name_estimators(transformers))
transformer_list = list(zip(names, transformers, columns))
return transformer_list
def make_column_transformer(*transformers,
remainder='drop',
sparse_threshold=0.3,
n_jobs=None,
verbose=False):
"""Construct a ColumnTransformer from the given transformers.
This is a shorthand for the ColumnTransformer constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting with ``transformer_weights``.
Read more in the :ref:`User Guide <make_column_transformer>`.
Parameters
----------
*transformers : tuples
Tuples of the form (transformer, columns) specifying the
transformer objects to be applied to subsets of the data.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support :term:`fit` and :term:`transform`.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively.
columns : str, array-like of str, int, array-like of int, slice, \
array-like of bool or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support :term:`fit` and :term:`transform`.
sparse_threshold : float, default=0.3
If the transformed output consists of a mix of sparse and dense data,
it will be stacked as a sparse matrix if the density is lower than this
value. Use ``sparse_threshold=0`` to always return dense.
When the transformed output consists of all sparse or all dense data,
the stacked result will be sparse or dense, respectively, and this
keyword will be ignored.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Returns
-------
ct : ColumnTransformer
See Also
--------
ColumnTransformer : Class that allows combining the
outputs of multiple transformer objects used on column subsets
of the data into a single feature space.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
>>> from sklearn.compose import make_column_transformer
>>> make_column_transformer(
... (StandardScaler(), ['numerical_column']),
... (OneHotEncoder(), ['categorical_column']))
ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
['numerical_column']),
('onehotencoder', OneHotEncoder(...),
['categorical_column'])])
"""
# transformer_weights keyword is not passed through because the user
# would need to know the automatically generated names of the transformers
transformer_list = _get_transformer_list(transformers)
return ColumnTransformer(transformer_list, n_jobs=n_jobs,
remainder=remainder,
sparse_threshold=sparse_threshold,
verbose=verbose)
class make_column_selector:
"""Create a callable to select columns to be used with
:class:`ColumnTransformer`.
:func:`make_column_selector` can select columns based on datatype or the
columns name with a regex. When using multiple selection criteria, **all**
criteria must match for a column to be selected.
Parameters
----------
pattern : str, default=None
Name of columns containing this regex pattern will be included. If
None, column selection will not be selected based on pattern.
dtype_include : column dtype or list of column dtypes, default=None
A selection of dtypes to include. For more details, see
:meth:`pandas.DataFrame.select_dtypes`.
dtype_exclude : column dtype or list of column dtypes, default=None
A selection of dtypes to exclude. For more details, see
:meth:`pandas.DataFrame.select_dtypes`.
Returns
-------
selector : callable
Callable for column selection to be used by a
:class:`ColumnTransformer`.
See Also
--------
ColumnTransformer : Class that allows combining the
outputs of multiple transformer objects used on column subsets
of the data into a single feature space.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
>>> from sklearn.compose import make_column_transformer
>>> from sklearn.compose import make_column_selector
>>> import pandas as pd # doctest: +SKIP
>>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP
>>> ct = make_column_transformer(
... (StandardScaler(),
... make_column_selector(dtype_include=np.number)), # rating
... (OneHotEncoder(),
... make_column_selector(dtype_include=object))) # city
>>> ct.fit_transform(X) # doctest: +SKIP
array([[ 0.90453403, 1. , 0. , 0. ],
[-1.50755672, 1. , 0. , 0. ],
[-0.30151134, 0. , 1. , 0. ],
[ 0.90453403, 0. , 0. , 1. ]])
"""
@_deprecate_positional_args
def __init__(self, pattern=None, *, dtype_include=None,
dtype_exclude=None):
self.pattern = pattern
self.dtype_include = dtype_include
self.dtype_exclude = dtype_exclude
def __call__(self, df):
"""Callable for column selection to be used by a
:class:`ColumnTransformer`.
Parameters
----------
df : dataframe of shape (n_features, n_samples)
DataFrame to select columns from.
"""
if not hasattr(df, 'iloc'):
raise ValueError("make_column_selector can only be applied to "
"pandas dataframes")
df_row = df.iloc[:1]
if self.dtype_include is not None or self.dtype_exclude is not None:
df_row = df_row.select_dtypes(include=self.dtype_include,
exclude=self.dtype_exclude)
cols = df_row.columns
if self.pattern is not None:
cols = cols[cols.str.contains(self.pattern, regex=True)]
return cols.tolist()
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
amal029/kaggle | modi.py | 1 | 2782 | #!/usr/bin/env python3
import pandas as pd
import string
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
def get_newline_index(s):
j = 0
for i, c in enumerate(s):
if (c == '\n') or (c == '\r'):
j = i
break
return ''.join([i for i in s[j:]
if i in string.printable])
def sentiment_analysis(speech):
# Do sentiment analysis for Modi's speeches over time
sid = SentimentIntensityAnalyzer()
sentences = speech.split('.')
pos, neg, neu = 0, 0, 0
for s in sentences:
ss = sid.polarity_scores(s)
if ss['pos'] >= 0.5:
pos += 1
elif ss['neg'] >= 0.5:
neg += 1
elif ss['neu'] >= 0.5:
neu += 1
return (pos, neg, neu)
def main(fileName):
df = pd.read_csv(fileName, encoding='iso-8859-1')
# Column names are Speech_text, month, year
df['Speech_text'] = [get_newline_index(t)
for t in df['Speech_text']]
# Tokenize words
speeches = df['Speech_text'] # Series of speeches
speeches_time = []
sentiment_time = []
for speech in speeches:
speech_tokens = nltk.word_tokenize(speech)
# Now remove the stopwords
speech_tokens = [w.lower() for w in speech_tokens
if w.lower() not in stopwords.words('english')]
speech_tokens = nltk.pos_tag(speech_tokens)
speech_tokens = [w for w, t in speech_tokens
if t == 'NN' or t == 'NNS']
# Now make a Histogram of the most used words in each speech
speech_dist = nltk.FreqDist(speech_tokens)
# put words in order across all speeches
# print(speech_dist.most_common(5))
speeches_time.append(speech_dist.most_common(10))
# Add the sentiment scores to the list
sentiment_time.append(sentiment_analysis(speech))
df['pos_sentimement'], df['neg_sentimement'], df['neu_sentimement'] \
= zip(*sentiment_time)
# Flattened
speeches_time = [j for i in speeches_time
for j in i]
# Now count the number of words across time series
final_dict = {k: 0
for k in set([i for i, j in speeches_time])}
for k in final_dict.keys():
final_dict[k] = sum([c for w, c in speeches_time
if w == k])
most_talked = (sorted(list(final_dict.items()), key=lambda v: v[1],
reverse=True))[:10]
# Modi talks most about these 10 things over time
print('most talked about nouns: ', most_talked)
# Write out the csv
df.to_csv('/tmp/modi_result.csv')
if __name__ == '__main__':
main('./mann-ki-baat-speech-corpus.zip')
| mit |
tudo-astroparticlephysics/pydisteval | disteval/visualization/comparison_plotter/functions/plot_funcs.py | 1 | 11371 | from __future__ import unicode_literals
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ColorConverter
import matplotlib.transforms as transforms
from colorsys import rgb_to_hls, hls_to_rgb
from .calc_funcs import map_aggarwal_ratio, rescale_limit
from . import legend_entries as le
MAIN_ZORDER = 4
def modify_color(color,
d_saturation=0.,
d_lightness=0.):
conv = ColorConverter()
if not isinstance(color, tuple):
rgb_color = conv.to_rgb(color)
else:
rgb_color = color
hls_color = rgb_to_hls(*rgb_color)
new_l = max(0, min(0.9, hls_color[1] + d_lightness))
new_s = max(0, min(1, hls_color[2] + d_saturation))
return hls_to_rgb(hls_color[0], new_l, new_s)
def plot_inf_marker(fig,
ax,
binning,
place_marker,
markeredgecolor='k',
markerfacecolor='none',
bot=True,
alpha=1.,
rel_marker_size=0.007):
# compute marker size
pixel_width, pixel_height = fig.canvas.get_width_height()
markersize = pixel_height * rel_marker_size
# get coordinate transformation
trans = transforms.blended_transform_factory(
ax.transData, fig.transFigure)
bbox = ax.get_position()
if bot:
y0 = bbox.y0 + rel_marker_size
marker = 'v'
else:
y0 = bbox.y1 - rel_marker_size
marker = '^'
bin_center = (binning[1:] + binning[:-1]) / 2
for bin_i, place in zip(bin_center, place_marker):
if place:
ax.plot([bin_i, ], [y0, ],
transform=trans,
marker=marker,
markerfacecolor=markerfacecolor,
markeredgecolor=markeredgecolor,
markersize=markersize,
figure=fig,
linewidth=1.,
zorder=MAIN_ZORDER + 1,
alpha=alpha)
def plot_finite_marker(ax, x, y, facecolor, edgecolor, alpha):
ax.plot(x,
y,
ls='',
mew=1.,
marker='o',
markeredgecolor=edgecolor,
markerfacecolor=facecolor,
alpha=alpha,
ms='5',
zorder=MAIN_ZORDER + 1)
def plot_data_style(fig,
ax,
bin_edges,
y,
facecolor,
edgecolor,
alpha,
ms='5'):
zero_mask = y > 0
bin_mids = (bin_edges[1:] + bin_edges[:-1]) / 2.
plot_finite_marker(ax,
x=bin_mids[zero_mask],
y=y[zero_mask],
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha)
plot_inf_marker(fig, ax,
bin_edges,
~zero_mask,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
alpha=alpha)
return le.DataObject(facecolor,
edgecolor,
facecolor,
edgecolor)
def plot_uncertainties(ax, bin_edges, uncert, color, cmap):
n_alpha = uncert.shape[1]
cmap = plt.get_cmap(cmap)
colors = cmap(np.linspace(0.1, 0.9, n_alpha))
legend_entries = []
legend_entries.append(le.UncertObject(colors, color))
for i, c in enumerate(colors[::-1]):
j = n_alpha - i - 1
lower_limit = uncert[:, j, 0]
upper_limit = uncert[:, j, 1]
mask = np.isfinite(lower_limit)
lower_limit[~mask] = 0.
mask = np.isfinite(upper_limit)
upper_limit[~mask] = 0.
plot_band(ax,
bin_edges,
lower_limit,
upper_limit,
c,
alpha=1.,
borders=False,
brighten=False,
zorder=MAIN_ZORDER)
for i, c in enumerate(colors):
legend_entries.append(le.UncertObject_single(c))
return legend_entries
def plot_band(ax,
bin_edges,
y_err_low,
y_err_high,
color,
alpha=0.5,
borders=1.,
brighten=True,
zorder=None):
if isinstance(borders, bool):
if borders:
border_lw = 0.3
plot_borders = True
else:
plot_borders = False
elif isinstance(borders, float):
border_lw = borders
plot_borders = True
else:
plot_borders = False
if zorder is None:
zorder = MAIN_ZORDER - 1
if brighten:
band_color = modify_color(color, 0, 0.4)
else:
band_color = color
alpha = min(1., max(0., alpha))
ax.fill_between(bin_edges,
np.append(y_err_low[0], y_err_low),
np.append(y_err_high[0], y_err_high),
step='pre',
color=band_color,
edgecolor=band_color,
linewidth=0.0,
alpha=alpha,
zorder=zorder - 1)
if plot_borders:
if brighten:
band_color = modify_color(color, 0, 0.2)
else:
band_color = color
plot_hist(ax,
bin_edges,
y_err_low,
color,
lw=border_lw,
alpha=1.0,
zorder=zorder)
plot_hist(ax,
bin_edges,
y_err_high,
color,
lw=border_lw,
alpha=1.0,
zorder=zorder)
# legend_obj = le.
legend_obj = None
return legend_obj
def plot_hist(ax,
bin_edges,
y,
color,
yerr=None,
lw=1.6,
alpha=1.0,
zorder=None):
if zorder is None:
zorder = MAIN_ZORDER
alpha = min(1., max(0., alpha))
bin_mids = (bin_edges[1:] + bin_edges[:-1]) / 2.
nan_mask = np.isfinite(y)
bin_mids_masked = bin_mids[nan_mask]
y_masked = y[nan_mask]
xerr_masked = (np.diff(bin_edges) / 2)[nan_mask]
if yerr is not None:
yerr_masked = yerr[nan_mask]
else:
yerr_masked = None
errorbar = ax.errorbar(x=bin_mids_masked,
y=y_masked,
ls='',
xerr=xerr_masked,
yerr=yerr_masked,
color=color,
markersize=0,
capsize=0,
lw=lw,
zorder=zorder,
label='Test')
return errorbar
def plot_line(ax,
bin_edges,
y,
color,
lw=1.6,
alpha=1.0,
zorder=None):
if zorder is None:
zorder = MAIN_ZORDER
alpha = min(1., max(0., alpha))
obj, = ax.plot(bin_edges,
np.append(y[0], y),
drawstyle='steps-pre',
lw=lw,
c=color,
label='test',
alpha=alpha,
zorder=zorder)
return obj
def plot_test_ratio_mapped(fig,
ax,
bin_edges,
ratio,
is_above,
facecolor,
edgecolor,
alpha):
bin_mids = (bin_edges[1:] + bin_edges[:-1]) / 2.
is_finite = np.isfinite(ratio)
finite_mask_upper = np.logical_and(is_finite, is_above)
finite_mask_lower = np.logical_and(is_finite, ~is_above)
plot_finite_marker(ax,
x=bin_mids[finite_mask_upper],
y=ratio[finite_mask_upper],
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha)
plot_finite_marker(ax,
x=bin_mids[finite_mask_lower],
y=ratio[finite_mask_lower],
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha)
oor_mask_upper = np.logical_and(is_above, np.isposinf(ratio))
no_ratio_mask_upper = np.logical_and(is_above, np.isneginf(ratio))
plot_inf_marker(fig,
ax,
bin_edges,
oor_mask_upper,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=False)
plot_inf_marker(fig,
ax,
bin_edges,
no_ratio_mask_upper,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=False,
alpha=0.5)
oor_mask_lower = np.logical_and(~is_above, np.isposinf(ratio))
no_ratio_mask_lower = np.logical_and(~is_above, np.isneginf(ratio))
plot_inf_marker(fig,
ax,
bin_edges,
oor_mask_lower,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=True)
plot_inf_marker(fig,
ax,
bin_edges,
no_ratio_mask_lower,
markerfacecolor=facecolor,
markeredgecolor=edgecolor,
bot=True,
alpha=0.5)
def generate_ticks_for_aggarwal_ratio(y_0, y_min, max_ticks_per_side=5):
y_min_floored = np.floor(y_min)
y_0_log = np.log10(y_0)
tick_pos = []
n_ticks = 1
tick_pos.append(y_0_log)
if y_0_log != np.floor(y_0_log):
tick_pos.append(np.floor(y_0_log))
n_ticks += 2
while tick_pos[-1] > y_min_floored:
tick_pos.append(tick_pos[-1] - 1)
n_ticks += 2
n_ticks_per_side = (n_ticks - 1) / 2
mayor_step_size = np.ceil(n_ticks_per_side / max_ticks_per_side)
tick_pos_mapped, y_min_ticks = map_aggarwal_ratio(np.power(10, tick_pos),
y_0=1.)
tick_pos_mapped = rescale_limit(tick_pos_mapped,
y_min_ticks,
y_min)
mayor_ticks = []
mayor_ticks_labels = []
minor_ticks = []
minor_ticks_labels = []
mayor_tick_counter = 0
for i, [p, l] in enumerate(zip(tick_pos_mapped, tick_pos)):
lab = 10**l
lab = r'$10^{{\mathrm{{{:.0f}}}}}$'.format(l)
if i == 0:
mayor_ticks_labels.append(lab)
mayor_ticks.append(0)
else:
if mayor_tick_counter == mayor_step_size:
mayor_ticks.extend([p * -1, p])
mayor_ticks_labels.extend([lab, lab])
mayor_tick_counter = 0
else:
minor_ticks.extend([p * -1, p])
minor_ticks_labels.extend([lab, lab])
mayor_tick_counter += 1
return mayor_ticks_labels, mayor_ticks, minor_ticks_labels, minor_ticks
| mit |
ElDeveloper/scikit-learn | sklearn/cluster/dbscan_.py | 7 | 11611 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
lpryszcz/HiCembler | .dna-triangulation/orienting.py | 1 | 9214 | # Main script for orienting
# Modules in orienting_mods.py which could be moved to triangulation.py later
# Example:
# python orienting.py -in Dixon12_hESC-AllpathsLGcontigs.tab -out results -pos contigs_pos.tab -real_ori contig_orientations.tab
########################################################################################################################
import orienting_mods
import triangulation
import numpy as np
import sys
import argparse
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description = 'Orient contigs within chromosome given interaction matrix.', formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-in', help = 'interaction frequency matrix file', dest = 'in_file', type = str, required = True)
parser.add_argument('-out', help = 'out file prefix', dest = 'out_file', type = str, required = True)
parser.add_argument('-pos', help = 'file with contig positions. "contig\tstart\tend"', dest = 'pos_file', type = str, required = True)
parser.add_argument('-real_ori', help = 'file with real orientations. "contig\tsign"', dest = 'real_ori_file', type = str, default = None)
args = parser.parse_args()
in_file = args.in_file
out_file = args.out_file
pos_file = args.pos_file
real_ori_file = args.real_ori_file
# Read contig interacion file
d, bin_chr, bin_position = triangulation.load_data_txt(in_file, remove_nans = True)
# Read contig pos file into dictionary
ID_col = 0
start_col = 1
end_col = 2
IDs = []
starts = []
ends = []
pos_fh = open(pos_file, 'r')
for line in pos_fh:
contig_line = line.split()
IDs.append(contig_line[ID_col])
starts.append(float(contig_line[start_col]))
ends.append(float(contig_line[end_col]))
pos_fh.close()
# Create position dictionary for downstream analysis
pos_dic = orienting_mods.make_pos_dic(IDs, starts, ends)
# Sort contigs by their positions
sorted_contigs_extra = orienting_mods.sort_by_pos(IDs, starts)
# Use only contigs that are in interaction matrix
sorted_contigs = []
for contig in sorted_contigs_extra:
if contig in bin_chr:
sorted_contigs.append(contig)
# Calculate bin centers
bin_center = np.mean(bin_position, axis = 1)
# Calculate the 4 orientation scores (edge wights) between each pair of contigs
# Return the weighted directed acyclic graph object
WDAG = orienting_mods.make_WDAG(d, bin_chr, bin_position, bin_center, sorted_contigs)
# Create sorted node list for input into shortest_path function
node_list = orienting_mods.sorted_nodes(sorted_contigs)
# Find shortest path through WDAG
orientation_results = orienting_mods.shortest_path(WDAG, node_list)
# Create output file for predicted orientations
OUT = open(out_file + '_pred_ori.txt', 'w+')
# Remove start and end node from orientation result list
orientation_results.remove("start")
orientation_results.remove("end")
# Format output results (Note contigs with single-bins default to forward)
for contig in orientation_results:
contig_ID = contig[:-3]
orientation = contig[-2:]
if orientation == "fw":
orientation = "+"
elif orientation == "rc":
orientation = "-"
else:
print "Error in formatting output!"
OUT.write(contig_ID + "\t" + orientation + "\n")
OUT.close()
if real_ori_file != None:
# Open true orientation data to test results against
true_fh = open(real_ori_file, 'r')
ID_col = 0
orient_col = 1
true_dic = {}
for line in true_fh:
contig_line = line.split()
contig_ID = contig_line[ID_col]
orientation = contig_line[orient_col]
true_dic[contig_ID] = orientation
true_fh.close()
# Record accuracy of prediction at different confidence thesholds
# Get max confidence
max_conf = orienting_mods.get_max_conf(WDAG, sorted_contigs)
thresholds = np.arange(0.0, max_conf, max_conf/200.0)
accuracy_list = []
# Record percent of contigs removed
percent_removed = []
for threshold in thresholds:
poor_conf = orienting_mods.poor_confidence(WDAG, sorted_contigs, threshold)
percent_removed.append(float(len(poor_conf))/float(len(sorted_contigs)))
# Calculate sensitivity, specificity, and accuracy such that fw is (+) and rc is (-)
# Accuracy will be percent of orientations correctly predicted out of total contig orientations
# Create prediction dictionary for orientation results
pred_dic = orienting_mods.make_pred_dic(orientation_results, poor_conf)
# Need to remove all contigs from true dictionary that are not in our prediction dictionary
adj_true_dic = orienting_mods.adjust_true_dic(true_dic, pred_dic)
# Calculate stats
P, N, TP, TN, accuracy = orienting_mods.calc_stats(adj_true_dic, pred_dic)
accuracy_list.append(accuracy)
# Plot results
y_bottom = min(accuracy_list + percent_removed)
fig, ax1 = plt.subplots()
ax1.plot(thresholds, accuracy_list)
ax1.set_xlabel("Confidence threshold")
ax1.set_title("Accuracy vs Confidence")
ax1.set_ylim(y_bottom-0.1, 1.0)
ax1.set_ylabel("Accuracy", color = 'b')
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax2 = ax1.twinx()
ax2.plot(thresholds, percent_removed, 'r-')
ax2.set_ylabel("Percent contigs removed", color = 'r')
ax2.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0, 0))
ax2.set_ylim(y_bottom-0.1, 1.0)
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.savefig(out_file + '_acc_conf_plot.png')
# Record accuracy of prediction at different contig size thresholds
# Get max contig length of all contigs with positions
max_length = orienting_mods.get_max_length(bin_chr, bin_position, sorted_contigs)
contig_lengths = np.arange(0.0, max_length, max_length/200.0)
accuracy_list = []
percent_removed = []
for contig_length in contig_lengths:
# Get all contigs with length < = length of threshold
small_contigs = orienting_mods.get_small_contigs(bin_chr, bin_position, sorted_contigs, contig_length)
# Add all single bin/score zero contigs to list of contigs to be removed
score_zeros = orienting_mods.poor_confidence(WDAG, sorted_contigs, 0.0)
remove_contigs = list(set(small_contigs).union(set(score_zeros)))
percent_removed.append(float(len(remove_contigs))/float(len(sorted_contigs)))
pred_dic = orienting_mods.make_pred_dic(orientation_results, remove_contigs)
# Need to remove all contigs from true dictionary that are not in our prediction dictionary
adj_true_dic = orienting_mods.adjust_true_dic(true_dic, pred_dic)
# Calculate stats
P, N, TP, TN, accuracy = orienting_mods.calc_stats(adj_true_dic, pred_dic)
accuracy_list.append(accuracy)
# Plot results
y_bottom = min(accuracy_list + percent_removed)
fig, ax1 = plt.subplots()
ax1.plot(contig_lengths, accuracy_list)
ax1.set_xlabel("Contig length threshold")
ax1.set_title("Accuracy vs Contig Length")
ax1.set_ylim(y_bottom-0.1, 1.0)
ax1.set_ylabel("Accuracy", color = 'b')
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax2 = ax1.twinx()
ax2.plot(contig_lengths, percent_removed, 'r-')
ax2.set_ylabel("Percent contigs removed", color = 'r')
ax2.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0, 0))
ax2.set_ylim(y_bottom-0.1, 1.0)
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.savefig(out_file + '_acc_size_plot.png')
# Record accuracy of prediction at different gap size thresholds
# Get max gap size between all contigs and min gap size between all contigs
max_gap, min_gap = orienting_mods.get_max_min_gap(sorted_contigs, pos_dic)
gap_lengths = np.arange(max_gap, min_gap, -max_gap/200.0)
accuracy_list = []
percent_removed = []
for gap_length in gap_lengths:
# Get all contigs with gap size > = gap of threshold
big_gaps = orienting_mods.get_big_gaps(pos_dic, sorted_contigs, gap_length)
remove_contigs = list(set(big_gaps).union(set(score_zeros)))
percent_removed.append(float(len(remove_contigs))/float(len(sorted_contigs)))
pred_dic = orienting_mods.make_pred_dic(orientation_results, remove_contigs)
adj_true_dic = orienting_mods.adjust_true_dic(true_dic, pred_dic)
# Calculate stats
P, N, TP, TN, accuracy = orienting_mods.calc_stats(adj_true_dic, pred_dic)
accuracy_list.append(accuracy)
# Plot results
y_bottom = min(accuracy_list + percent_removed)
fig, ax1 = plt.subplots()
ax1.plot(gap_lengths, accuracy_list)
ax1.set_xlabel("Gap length threshold")
ax1.set_title("Accuracy vs Gap Length")
ax1.set_ylim(y_bottom-0.1, 1.0)
ax1.set_ylabel("Accuracy", color = 'b')
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax2 = ax1.twinx()
ax2.plot(gap_lengths, percent_removed, 'r-')
ax2.set_ylabel("Percent contigs removed", color = 'r')
ax2.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0, 0))
ax2.set_ylim(y_bottom-0.1, 1.0)
ax2.invert_xaxis()
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.savefig(out_file + '_acc_gaps_plot.png')
if __name__ == "__main__":
main()
| gpl-3.0 |
lnhubbell/tweetTrack | streamScript/domain/create_classifier.py | 1 | 6595 | import numpy as np
from sklearn.feature_extraction.text import CountVectorizer as CV
from sklearn.naive_bayes import MultinomialNB as MNB
from sklearn.cross_validation import cross_val_score
from streamScript.domain.query_db import query_all_db, query_all_db_Tweet200
from streamScript.domain.query_db import read_in_bb_file
import picklers
u"""You should interact with this file through the 'if name == main' block.
Edit the kwargs you want in order to create an pickle a classifier. This
classifier will be used to make your predictions."""
def check_city_locations(location_lat, location_lng):
u"""Takes in lati"""
bb_dict = read_in_bb_file()
for city, values in bb_dict.items():
lats = values[0]
lngs = values[1]
if (float(lats[0]) < float(location_lat) < float(lats[1])) and \
(float(lngs[0]) < float(location_lng) < float(lngs[1])):
return city
def get_most_common_city(user_city):
u"""Takes in a dictionary of city names; returns the most frequently
occurring city in the dict."""
top = None
top_num = 0
for city, count in user_city.items():
if count > top_num:
top_num = count
top = city
return top
def build_test_matrix(history, vocab, testing):
u"""Takes in a list of lists, with each list containing tuples
representing tweets from a single user, and a vocab list. Returns an X
matrix of the test user features, a list of the user names, and a Y
array of the labels."""
matrix = []
user_string = ""
user_city = {}
user_name = history[0][0]
for tweet in history:
if history[0][0] == user_name:
user_string += tweet[1].lower()
if history[0][2] and history[0][3] and not testing:
actual = check_city_locations(history[0][2], history[0][3])
if actual in user_city:
user_city[actual] += 1
else:
user_city[actual] = 1
matrix.append(user_string)
if user_city:
ret_user_city = get_most_common_city(user_city)
else:
ret_user_city = history[0][5]
vec = CV(
analyzer='word',
vocabulary=vocab
)
print "Building test X, Y..."
X = vec.fit_transform(matrix, vocab).toarray()
return X, user_name, ret_user_city
def vectorize(user_matrix, user_array, n):
stopwords = open('text/stopwords.txt').read().lower().split()
vec = CV(
analyzer='word',
stop_words=stopwords,
max_features=n,
)
print "Building X, Y..."
X = vec.fit_transform(user_matrix).toarray()
Y = np.array(user_array)
return X, Y, vec.get_feature_names()
def build_matrix(data, n=10000):
u"""Uses blocks of tweets from multiple users per city.
Takes in a raw dataset and an optional parameter to limit the feature
set to n. Defaults to 10000. Returns a tuple containing a matrix of n
features,
a vector of labels, and a vocabulary list of the features examined."""
user_matrix = []
user_array = []
tweet_count = 0
for key, val in data.items():
for tweet in val:
if not tweet_count % 200:
user_array.append(key)
user_matrix.append(" ")
user_matrix[-1] += tweet[2].lower()
tweet_count += 1
return user_matrix, user_array, n
def build_matrix_per_user(data, n=10000):
u""" Uses blocks of tweets from single users per city.
Takes in a raw dataset and an optional parameter to limit the feature
set to n. Defaults to 10000. Returns a tuple containing a matrix of n
features,
a vector of labels, and a vocabulary list of the features examined."""
user_matrix = []
user_array = []
for key, val in data.items():
user_list = []
count = 0
user_count = 0
# print key
# print len(val)
for tweet in val:
if user_count >= 100:
continue
if count == 0:
this_user = tweet[1]
our_string = ""
if (tweet[1] == this_user) and (count < 200):
our_string += tweet[2].lower()
count += 1
elif (tweet[1] != this_user):
count = 0
user_count += 1
user_matrix.append(our_string)
user_array.append(key)
user_list.append(this_user)
return user_matrix, user_array, n
def fit_classifier(X, y):
u"""Takes in an X matrix and a Y array of labels.
Fits classifier"""
mnb = MNB()
return mnb.fit(X, y)
def check_alphas(X, y):
u"""Takes in an X matrix and a Y array of labels.
Checks five possible alpha values; returns the
classifier with the highest cross-validated score."""
best = None
best_score = None
alphas = [1E-4, 1E-3, 1E-2, 1E-1, 1]
for alpha in alphas:
mnb = MNB(alpha)
score = np.mean(
cross_val_score(mnb, X, y, cv=10)
)
print "alpha: ", alpha, "score: ", score
if not best:
best = mnb
best_score = score
best_alpha = alpha
elif score > best_score:
best_score = score
best = mnb
best_alpha = alpha
best.fit(X, y)
print "our best score and our best alpha:"
print best_score, best_alpha
return best
def get_raw_classifier(
make_new_pickles=False, read_pickles=True, useTweet200=False
):
u"""Takes in keyword arguments to determine source of data. Returns a
trained classifier."""
if read_pickles:
X = picklers.load_pickle('matrix_pickle')
y = picklers.load_pickle('labels_pickle')
else:
if useTweet200:
data = query_all_db_Tweet200()
user_matrix, user_array, n = build_matrix_per_user(data)
else:
data = query_all_db(limit=True)
user_matrix, user_array, n = build_matrix(data)
X, y, vocab = vectorize(user_matrix, user_array, n)
mnb = check_alphas(X, y)
#mnb = fit_classifier(X, y)
picklers.write_pickle(mnb, 'classifier_pickle')
picklers.write_pickle(vocab, 'vocab_pickle')
if make_new_pickles:
picklers.write_pickle(data, 'pickle')
picklers.write_pickle(X, 'matrix_pickle')
picklers.write_pickle(y, 'labels_pickle')
print "returning mnb"
return mnb
if __name__ == "__main__":
print get_raw_classifier(
make_new_pickles=True, read_pickles=False, useTweet200=True
)
| mit |
GuessWhoSamFoo/pandas | pandas/tests/indexes/multi/test_get_set.py | 1 | 15757 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import range
import pandas as pd
from pandas import CategoricalIndex, Index, MultiIndex
import pandas.util.testing as tm
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
def test_get_level_number_integer(idx):
idx.names = [1, 0]
assert idx._get_level_number(1) == 0
assert idx._get_level_number(0) == 1
pytest.raises(IndexError, idx._get_level_number, 2)
with pytest.raises(KeyError, match='Level fourth not found'):
idx._get_level_number('fourth')
def test_get_level_values(idx):
result = idx.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = idx.get_level_values('first')
expected = idx.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
codes=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_value_duplicates():
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_level_values_all_na():
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_int_with_na():
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na():
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_set_name_methods(idx, index_names):
# so long as these are synonyms, we don't need to test set_names
assert idx.rename == idx.set_names
new_names = [name + "SUFFIX" for name in index_names]
ind = idx.set_names(new_names)
assert idx.names == index_names
assert ind.names == new_names
with pytest.raises(ValueError, match="^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = idx.set_names(new_names[0], level=0)
assert idx.names == index_names
assert ind.names == [new_names[0], index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], index_names[1]]
# set names for multiple levels
ind = idx.set_names(new_names, level=[0, 1])
assert idx.names == index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
def test_set_levels_codes_directly(idx):
# setting levels/codes directly raises AttributeError
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
codes = idx.codes
major_codes, minor_codes = codes
major_codes = [(x + 1) % 3 for x in major_codes]
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
with pytest.raises(AttributeError):
idx.levels = new_levels
with pytest.raises(AttributeError):
idx.codes = new_codes
def test_set_levels(idx):
# side note - you probably wouldn't want to use levels and codes
# directly like this - but it is possible.
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
# level changing [w/o mutation]
ind2 = idx.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = idx.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = idx.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing specific level [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = idx.copy()
for inplace in [True, False]:
with pytest.raises(ValueError, match="^On"):
idx.set_levels(['c'], level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with pytest.raises(ValueError, match="^On"):
idx.set_codes([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(idx.codes, original_index.codes,
check_dtype=True)
with pytest.raises(TypeError, match="^Levels"):
idx.set_levels('c', level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with pytest.raises(TypeError, match="^Codes"):
idx.set_codes(1, level=0, inplace=inplace)
assert_matching(idx.codes, original_index.codes,
check_dtype=True)
def test_set_codes(idx):
# side note - you probably wouldn't want to use levels and codes
# directly like this - but it is possible.
codes = idx.codes
major_codes, minor_codes = codes
major_codes = [(x + 1) % 3 for x in major_codes]
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
# changing codes w/o mutation
ind2 = idx.set_codes(new_codes)
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# changing label w/ mutation
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
# codes changing specific level w/o mutation
ind2 = idx.set_codes(new_codes[0], level=0)
assert_matching(ind2.codes, [new_codes[0], codes[1]])
assert_matching(idx.codes, codes)
ind2 = idx.set_codes(new_codes[1], level=1)
assert_matching(ind2.codes, [codes[0], new_codes[1]])
assert_matching(idx.codes, codes)
# codes changing multiple levels w/o mutation
ind2 = idx.set_codes(new_codes, level=[0, 1])
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# label changing specific level w/ mutation
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [new_codes[0], codes[1]])
assert_matching(idx.codes, codes)
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [codes[0], new_codes[1]])
assert_matching(idx.codes, codes)
# codes changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_codes = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_codes])
# [w/o mutation]
result = ind.set_codes(codes=new_codes, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_codes(codes=new_codes, level=1, inplace=True)
assert result.equals(expected)
with tm.assert_produces_warning(FutureWarning):
ind.set_codes(labels=new_codes, level=1)
def test_set_labels_deprecated():
# GH23752
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
with tm.assert_produces_warning(FutureWarning):
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
with tm.assert_produces_warning(FutureWarning):
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_codes_names_bad_input(idx):
levels, codes = idx.levels, idx.codes
names = idx.names
with pytest.raises(ValueError, match='Length of levels'):
idx.set_levels([levels[0]])
with pytest.raises(ValueError, match='Length of codes'):
idx.set_codes([codes[0]])
with pytest.raises(ValueError, match='Length of names'):
idx.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_codes(codes[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list-like'):
idx.set_names(names[0])
# should have equal lengths
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_levels(levels[0], level=[0, 1])
with pytest.raises(TypeError, match='list-like'):
idx.set_levels(levels, level=0)
# should have equal lengths
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_codes(codes[0], level=[0, 1])
with pytest.raises(TypeError, match='list-like'):
idx.set_codes(codes, level=0)
# should have equal lengths
with pytest.raises(ValueError, match='Length of names'):
idx.set_names(names[0], level=[0, 1])
with pytest.raises(TypeError, match='Names must be a'):
idx.set_names(names, level=0)
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
codes=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('ordered', [True, False])
def test_set_levels_categorical(ordered):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
codes=index.codes)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_set_value_keeps_names():
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_set_levels_with_iterable():
# GH23273
sizes = [1, 2, 3]
colors = ['black'] * 3
index = pd.MultiIndex.from_arrays([sizes, colors], names=['size', 'color'])
result = index.set_levels(map(int, ['3', '2', '1']), level='size')
expected_sizes = [3, 2, 1]
expected = pd.MultiIndex.from_arrays([expected_sizes, colors],
names=['size', 'color'])
tm.assert_index_equal(result, expected)
| bsd-3-clause |
nmartensen/pandas | pandas/plotting/_compat.py | 11 | 1602 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
from distutils.version import LooseVersion
def _mpl_le_1_2_1():
try:
import matplotlib as mpl
return (str(mpl.__version__) <= LooseVersion('1.2.1') and
str(mpl.__version__)[0] != '0')
except ImportError:
return False
def _mpl_ge_1_3_1():
try:
import matplotlib
# The or v[0] == '0' is because their versioneer is
# messed up on dev
return (matplotlib.__version__ >= LooseVersion('1.3.1') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_1_4_0():
try:
import matplotlib
return (matplotlib.__version__ >= LooseVersion('1.4') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_1_5_0():
try:
import matplotlib
return (matplotlib.__version__ >= LooseVersion('1.5') or
matplotlib.__version__[0] == '0')
except ImportError:
return False
def _mpl_ge_2_0_0():
try:
import matplotlib
return matplotlib.__version__ >= LooseVersion('2.0')
except ImportError:
return False
def _mpl_le_2_0_0():
try:
import matplotlib
return matplotlib.compare_versions('2.0.0', matplotlib.__version__)
except ImportError:
return False
def _mpl_ge_2_0_1():
try:
import matplotlib
return matplotlib.__version__ >= LooseVersion('2.0.1')
except ImportError:
return False
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/psd_demo_complex.py | 12 | 1166 | #This is a ported version of a MATLAB example from the signal processing
#toolbox that showed some difference at one time between Matplotlib's and
#MATLAB's scaling of the PSD. This differs from psd_demo3.py in that
#this uses a complex signal, so we can see that complex PSD's work properly
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
fs = 1000
t = np.linspace(0, 0.3, 301)
A = np.array([2, 8]).reshape(-1, 1)
f = np.array([150, 140]).reshape(-1, 1)
xn = (A * np.exp(2j * np.pi * f * t)).sum(axis=0) + 5 * np.random.randn(*t.shape)
yticks = np.arange(-50, 30, 10)
xticks = np.arange(-500,550,100)
plt.subplots_adjust(hspace=0.45, wspace=0.3)
ax = plt.subplot(1, 2, 1)
plt.psd(xn, NFFT=301, Fs=fs, window=mlab.window_none, pad_to=1024,
scale_by_freq=True)
plt.title('Periodogram')
plt.yticks(yticks)
plt.xticks(xticks)
plt.grid(True)
plt.xlim(-500, 500)
plt.subplot(1, 2, 2, sharex=ax, sharey=ax)
plt.psd(xn, NFFT=150, Fs=fs, window=mlab.window_none, noverlap=75, pad_to=512,
scale_by_freq=True)
plt.title('Welch')
plt.xticks(xticks)
plt.yticks(yticks)
plt.ylabel('')
plt.grid(True)
plt.xlim(-500, 500)
plt.show()
| gpl-2.0 |
restudToolbox/package | respy/tests/test_versions.py | 1 | 9657 | from pandas.util.testing import assert_frame_equal
import numpy as np
import pandas as pd
import pytest
from codes.auxiliary import write_interpolation_grid
from codes.random_init import generate_random_dict
from codes.auxiliary import compare_est_log
from codes.random_init import generate_init
from codes.auxiliary import write_draws
from respy.python.solve.solve_auxiliary import pyth_create_state_space
from respy.python.shared.shared_constants import TEST_RESOURCES_DIR
from respy.python.shared.shared_auxiliary import print_init_dict
from respy.python.shared.shared_constants import IS_FORTRAN
from respy import estimate
from respy import simulate
from respy import RespyCls
@pytest.mark.skipif(not IS_FORTRAN, reason='No FORTRAN available')
@pytest.mark.usefixtures('fresh_directory', 'set_seed')
class TestClass(object):
""" This class groups together some tests.
"""
def test_1(self):
""" Testing the equality of an evaluation of the criterion function for
a random request.
"""
# Run evaluation for multiple random requests.
is_deterministic = np.random.choice([True, False], p=[0.10, 0.9])
is_interpolated = np.random.choice([True, False], p=[0.10, 0.9])
is_myopic = np.random.choice([True, False], p=[0.10, 0.9])
max_draws = np.random.randint(10, 100)
# Generate random initialization file
constr = dict()
constr['is_deterministic'] = is_deterministic
constr['flag_parallelism'] = False
constr['is_myopic'] = is_myopic
constr['max_draws'] = max_draws
constr['maxfun'] = 0
init_dict = generate_random_dict(constr)
# The use of the interpolation routines is a another special case.
# Constructing a request that actually involves the use of the
# interpolation routine is a little involved as the number of
# interpolation points needs to be lower than the actual number of
# states. And to know the number of states each period, I need to
# construct the whole state space.
if is_interpolated:
# Extract from future initialization file the information
# required to construct the state space. The number of periods
# needs to be at least three in order to provide enough state
# points.
num_periods = np.random.randint(3, 6)
edu_start = init_dict['EDUCATION']['start']
edu_max = init_dict['EDUCATION']['max']
min_idx = min(num_periods, (edu_max - edu_start + 1))
max_states_period = pyth_create_state_space(num_periods, edu_start,
edu_max, min_idx)[3]
# Updates to initialization dictionary that trigger a use of the
# interpolation code.
init_dict['BASICS']['periods'] = num_periods
init_dict['INTERPOLATION']['flag'] = True
init_dict['INTERPOLATION']['points'] = \
np.random.randint(10, max_states_period)
# Print out the relevant initialization file.
print_init_dict(init_dict)
# Write out random components and interpolation grid to align the
# three implementations.
num_periods = init_dict['BASICS']['periods']
write_draws(num_periods, max_draws)
write_interpolation_grid('test.respy.ini')
# Clean evaluations based on interpolation grid,
base_val, base_data = None, None
for version in ['PYTHON', 'FORTRAN']:
respy_obj = RespyCls('test.respy.ini')
# Modify the version of the program for the different requests.
respy_obj.unlock()
respy_obj.set_attr('version', version)
respy_obj.lock()
# Solve the model
respy_obj = simulate(respy_obj)
# This parts checks the equality of simulated dataset for the
# different versions of the code.
data_frame = pd.read_csv('data.respy.dat', delim_whitespace=True)
if base_data is None:
base_data = data_frame.copy()
assert_frame_equal(base_data, data_frame)
# This part checks the equality of an evaluation of the
# criterion function.
_, crit_val = estimate(respy_obj)
if base_val is None:
base_val = crit_val
np.testing.assert_allclose(base_val, crit_val, rtol=1e-05,
atol=1e-06)
# We know even more for the deterministic case.
if constr['is_deterministic']:
assert (crit_val in [-1.0, 0.0])
def test_2(self):
""" This test ensures that the evaluation of the criterion function
at the starting value is identical between the different versions.
"""
max_draws = np.random.randint(10, 100)
# Generate random initialization file
constr = dict()
constr['flag_parallelism'] = False
constr['max_draws'] = max_draws
constr['flag_interpolation'] = False
constr['maxfun'] = 0
# Generate random initialization file
init_dict = generate_init(constr)
# Perform toolbox actions
respy_obj = RespyCls('test.respy.ini')
# Simulate a dataset
simulate(respy_obj)
# Iterate over alternative implementations
base_x, base_val = None, None
num_periods = init_dict['BASICS']['periods']
write_draws(num_periods, max_draws)
for version in ['FORTRAN', 'PYTHON']:
respy_obj.unlock()
respy_obj.set_attr('version', version)
respy_obj.lock()
x, val = estimate(respy_obj)
# Check for the returned parameters.
if base_x is None:
base_x = x
np.testing.assert_allclose(base_x, x)
# Check for the value of the criterion function.
if base_val is None:
base_val = val
np.testing.assert_allclose(base_val, val)
def test_3(self):
""" Test the solution of deterministic model with ambiguity and
interpolation. This test has the same result as in the absence of
random variation in payoffs, it does not matter whether the
environment is ambiguous or not.
"""
# Solve specified economy
for version in ['FORTRAN', 'PYTHON']:
respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')
respy_obj.unlock()
respy_obj.set_attr('version', version)
respy_obj.lock()
respy_obj = simulate(respy_obj)
# Assess expected future value
val = respy_obj.get_attr('periods_emax')[0, :1]
np.testing.assert_allclose(val, 88750)
# Assess evaluation
_, val = estimate(respy_obj)
np.testing.assert_allclose(val, -1.0)
def test_4(self):
""" Test the solution of deterministic model without ambiguity,
but with interpolation. As a deterministic model is requested,
all versions should yield the same result without any additional effort.
"""
# Solve specified economy
for version in ['FORTRAN', 'PYTHON']:
respy_obj = RespyCls(TEST_RESOURCES_DIR + '/test_fifth.respy.ini')
respy_obj.unlock()
respy_obj.set_attr('version', version)
respy_obj.lock()
respy_obj = simulate(respy_obj)
# Assess expected future value
val = respy_obj.get_attr('periods_emax')[0, :1]
np.testing.assert_allclose(val, 88750)
# Assess evaluation
_, val = estimate(respy_obj)
np.testing.assert_allclose(val, -1.0)
def test_5(self):
""" This test ensures that the logging looks exactly the same for the
different versions.
"""
max_draws = np.random.randint(10, 300)
# Generate random initialization file
constr = dict()
constr['flag_parallelism'] = False
constr['max_draws'] = max_draws
constr['flag_interpolation'] = False
constr['maxfun'] = 0
# Generate random initialization file
init_dict = generate_init(constr)
# Perform toolbox actions
respy_obj = RespyCls('test.respy.ini')
# Iterate over alternative implementations
base_sol_log, base_est_info_log, base_est_log = None, None, None
base_sim_log = None
num_periods = init_dict['BASICS']['periods']
write_draws(num_periods, max_draws)
for version in ['FORTRAN', 'PYTHON']:
respy_obj.unlock()
respy_obj.set_attr('version', version)
respy_obj.lock()
simulate(respy_obj)
estimate(respy_obj)
# Check for identical logging
if base_sol_log is None:
base_sol_log = open('sol.respy.log', 'r').read()
assert open('sol.respy.log', 'r').read() == base_sol_log
# Check for identical logging
if base_sim_log is None:
base_sim_log = open('sim.respy.log', 'r').read()
assert open('sim.respy.log', 'r').read() == base_sim_log
if base_est_info_log is None:
base_est_info_log = open('est.respy.info', 'r').read()
assert open('est.respy.info', 'r').read() == base_est_info_log
if base_est_log is None:
base_est_log = open('est.respy.log', 'r').readlines()
compare_est_log(base_est_log)
| mit |
jcrudy/py-earth | pyearth/export.py | 3 | 8244 | def export_python_function(earth_model):
"""
Exports model as a pure python function, with no numpy/scipy/sklearn dependencies.
:param earth_model: Trained pyearth model
:return: A function that accepts an iterator over examples, and returns an iterator over transformed examples
"""
i = 0
accessors = []
for bf in earth_model.basis_:
if not bf.is_pruned():
accessors.append(bf.func_factory(earth_model.coef_[0, i]))
i += 1
def func(example_iterator):
return [sum(accessor(row) for accessor in accessors) for row in example_iterator]
return func
def export_python_string(earth_model, function_name="model"):
"""
Exports model as a string that evaluates as python code, with no numpy/scipy/sklearn dependencies.
:param earth_model: Trained pyearth model
:param function_name: string, optional, will be the name of the function in the returned string
:return: string, when executed (either by writing to a file, or using `exec`, will define a python
function that accepts an iterator over examples, and returns an iterator over transformed examples
"""
i = 0
accessors = []
for bf in earth_model.basis_:
if not bf.is_pruned():
accessors.append(bf.func_string_factory(earth_model.coef_[0, i]))
i += 1
return """def {:s}(example_iterator):
accessors = [{:s}]
for x in example_iterator:
yield sum(accessor(x) for accessor in accessors)
""".format(function_name, ",\n\t\t".join(accessors))
def export_sympy_term_expressions(earth_model):
"""
Construct a list of sympy expressions for all non-pruned terms in the model.
:param earth_model: Trained pyearth model
:return: a list of sympy expressions representing terms in the model. These
expressions are the symbolic equivalent of the Earth.transform method.
"""
from sympy import Symbol, Add, Mul, Max, RealNumber, Piecewise, Pow, And, nan, Function, Not
from ._basis import LinearBasisFunction, HingeBasisFunction, SmoothedHingeBasisFunction, \
MissingnessBasisFunction, ConstantBasisFunction, VariableBasisFunction
Missing = Function('Missing')
NaNProtect = Function('NaNProtect')
def linear_bf_to_factor(bf, bf_var):
return bf_var
def smoothed_hinge_bf_to_factor(bf, bf_var):
knot = RealNumber(bf.get_knot())
knot_minus = RealNumber(bf.get_knot_minus())
knot_plus = RealNumber(bf.get_knot_plus())
r = RealNumber(bf.get_r())
p = RealNumber(bf.get_p())
if bf.get_reverse():
lower_p = (-(bf_var - knot)), (bf_var <= knot_minus)
upper_p = (0, bf_var >= knot_plus)
left_exp = Mul(p, Pow((bf_var - knot_plus), 2))
right_exp = Mul(r, Pow((bf_var - knot_plus), 3))
middle_b = And(knot_minus < bf_var, bf_var < knot_plus)
middle_exp = (Add(left_exp, right_exp), middle_b)
piecewise = Piecewise(lower_p, upper_p, middle_exp)
factor = piecewise
else:
lower_p = (0, bf_var <= knot_minus)
upper_p = (bf_var - knot, bf_var >= knot_plus)
left_exp = Mul(p, Pow((bf_var - knot_minus), 2))
right_exp = Mul(r, Pow((bf_var - knot_minus), 3))
middle_b = And(knot_minus < bf_var, bf_var < knot_plus)
middle_exp = (Add(left_exp, right_exp), middle_b)
piecewise = Piecewise(lower_p, upper_p, middle_exp)
factor = piecewise
return factor
def hinge_bf_to_factor(bf, bf_var):
knot = bf.get_knot()
if bf.get_reverse():
factor = Max(0, RealNumber(knot) - bf_var)
else:
factor = Max(0, bf_var - RealNumber(knot))
return factor
def missingness_bf_to_factor(bf, bf_var):
# This is the error that should be raised when a user attempts to use functionality
# that has not yet been implemented.
if bf.complement:
return Not(Missing(bf_var))
else:
return Missing(bf_var)
def constant_bf_to_factor(bf, bf_var):
return RealNumber(1)
def protect_from_nan(label, missables):
return NaNProtect(Symbol(label)) if label in missables else Symbol(label)
def dont_protect_from_nan(label, missables):
return Symbol(label)
bf_to_factor_dispatcher = {LinearBasisFunction: linear_bf_to_factor,
SmoothedHingeBasisFunction: smoothed_hinge_bf_to_factor,
HingeBasisFunction: hinge_bf_to_factor,
MissingnessBasisFunction: missingness_bf_to_factor,
ConstantBasisFunction: constant_bf_to_factor}
nan_protect_dispatch = {LinearBasisFunction: protect_from_nan,
SmoothedHingeBasisFunction: protect_from_nan,
HingeBasisFunction: protect_from_nan,
MissingnessBasisFunction: dont_protect_from_nan,
ConstantBasisFunction: protect_from_nan}
def bf_to_factor(bf, missables):
'''
Convert a BasisFunction to a factor of a term.
'''
if isinstance(bf, VariableBasisFunction):
bf_var = nan_protect_dispatch[bf.__class__](bf.label, missables)
else:
bf_var = None
return bf_to_factor_dispatcher[bf.__class__](bf, bf_var)
def missingness_bf_get_missables(bf):
bf_var = bf.label
return set([bf_var])
def non_missable(bf):
return set()
bf_get_missables_dispatcher = {LinearBasisFunction: non_missable,
SmoothedHingeBasisFunction: non_missable,
HingeBasisFunction: non_missable,
MissingnessBasisFunction: missingness_bf_get_missables,
ConstantBasisFunction: non_missable}
def get_missables(bf):
missables = bf_get_missables_dispatcher[bf.__class__](bf)
parent = bf.get_parent()
if parent is None:
return missables
else:
missables.update(get_missables(parent))
return missables
def bf_to_term(bf, missables):
'''
Convert a BasisFunction to a term (without coefficient).
'''
term = bf_to_factor(bf, missables)
parent = bf.get_parent()
if parent is None:
return term
else:
return Mul(term, bf_to_term(parent, missables))
return [bf_to_term(bf, get_missables(bf)) for bf in earth_model.basis_.piter()]
def export_sympy(earth_model, columns=None):
"""
Constructs a sympy expression or list of sympy expressions from of a trained earth model.
:param earth_model: Trained pyearth model
:param columns: The index or indices of the output columns for which expressions are to
be constructed. If an integer is used, a sympy expression is returned. If indices
are given then a list of sympy expressions is returned. If columns is None, it is treated
as if columns=0 for models with only one output column or as columns=slice(None) for more than
one output column.
:return: a sympy expression or list of sympy expressions equivalent to the Earth.predict method for
the selected output columns.
"""
# Set a sane default for columns
if columns is None:
if earth_model.coef_.shape[0] == 1:
columns = 0
else:
columns = slice(None)
# Get basis function terms
terms = export_sympy_term_expressions(earth_model)
# Handle column choice
coefs = earth_model.coef_[columns]
if len(coefs.shape) == 1:
unwrap = True
coefs = [coefs]
n_cols = 1
else:
unwrap = False
n_cols = coefs.shape[0]
# Combine coefficients with terms for each output column
result = [sum([coefs[i][j] * term for j, term in enumerate(terms)]) for i in range(n_cols)]
if unwrap:
# Result should be an expression rather than a list of expressions.
result = result[0]
return result
| bsd-3-clause |
msbeta/apollo | modules/tools/prediction/learning_algorithms/utilities/train_utils.py | 1 | 6620 | ###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import logging
import math
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, sampler
def cuda(x):
#return x
if isinstance(x, (list, tuple)):
return [cuda(y) for y in x]
return x.cuda() if torch.cuda.is_available() else x
def train_vanilla(train_X, train_y, model, loss, optimizer, epoch,
batch_preprocess, batch_size=1024, print_period=100):
model.train()
loss_history = []
logging.info('Epoch: {}:'.format(epoch))
print('Epoch: {}:'.format(epoch))
num_of_data = train_X.shape[0]
num_of_batch = math.ceil(num_of_data / batch_size)
pred_y = []
for i in range(num_of_batch):
optimizer.zero_grad()
X = train_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = train_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
X, y = batch_preprocess(X, y)
pred = model(X)
train_loss = loss.loss_fn(pred, y)
loss_history.append(train_loss.item())
train_loss.backward()
optimizer.step()
pred = pred.detach().cpu().numpy()
pred_y.append(pred)
if (i > 0) and (i % print_period == 0):
logging.info(' Step: {}, training loss: {}'.format(
i, np.mean(loss_history[-print_period:])))
print (' Step: {}, training loss: {}'.format(
i, np.mean(loss_history[-print_period:])))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
print('Training Loss: {}'.format(train_loss))
loss.loss_info(pred_y, train_y)
def valid_vanilla(valid_X, valid_y, model, loss, batch_preprocess,
batch_size=1024):
model.eval()
loss_history = []
num_of_data = valid_X.shape[0]
num_of_batch = math.ceil(num_of_data / batch_size)
pred_y = []
for i in range(num_of_batch):
X = valid_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = valid_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
X, y = batch_preprocess(X, y)
pred = model(X)
valid_loss = loss.loss_fn(pred, y)
loss_history.append(valid_loss.item())
pred = pred.detach().cpu().numpy()
pred_y.append(pred)
valid_loss = np.mean(loss_history)
logging.info('Validation loss: {}.'.format(valid_loss))
print ('Validation loss: {}.'.format(valid_loss))
loss.loss_info(pred_y, valid_y)
return valid_loss
def train_valid_vanilla(train_X, train_y, valid_X, valid_y, model, loss,
optimizer, scheduler, epochs, save_name, batch_preprocess,
train_batch=1024, print_period=100, valid_batch=1024):
best_valid_loss = float('+inf')
for epoch in range(1, epochs+1):
train_vanilla(train_X, train_y, model, loss, optimizer, epoch,
batch_preprocess, train_batch, print_period)
valid_loss = valid_vanilla(valid_X, valid_y, model, loss,
batch_preprocess, valid_batch)
scheduler.step(valid_loss)
# TODO(jiacheng): add early stopping mechanism
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
# save_checkpoint()
return model
def train_dataloader(train_loader, model, loss, optimizer, epoch, print_period=100):
with torch.autograd.set_detect_anomaly(True):
model.train()
loss_history = []
logging.info('Epoch: {}:'.format(epoch))
print('Epoch: {}:'.format(epoch))
for i, (X, y) in enumerate(train_loader):
optimizer.zero_grad()
X, y = cuda(X), cuda(y)
pred = model(X)
train_loss = loss.loss_fn(pred, y)
loss_history.append(train_loss.item())
train_loss.backward()
optimizer.step()
if (i > 0) and (i % print_period == 0):
logging.info(' Step: {}, training loss: {}'.format(
i, np.mean(loss_history[-print_period:])))
print (' Step: {}, training loss: {}'.format(
i, np.mean(loss_history[-print_period:])))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
print('Training Loss: {}'.format(train_loss))
def valid_dataloader(valid_loader, model, loss):
model.eval()
loss_history = []
for i, (X, y) in enumerate(valid_loader):
X, y = cuda(X), cuda(y)
pred = model(X)
valid_loss = loss.loss_fn(pred, y)
loss_history.append(valid_loss.item())
valid_loss_info = loss.loss_info(pred, y).item()
print (valid_loss_info)
logging.info('Validation avg displacement = {}'.format(valid_loss_info))
valid_loss = np.mean(loss_history)
logging.info('Validation loss: {}.'.format(valid_loss))
print ('Validation loss: {}.'.format(valid_loss))
return valid_loss
def train_valid_dataloader(train_loader, valid_loader, model, loss, optimizer,
scheduler, epochs, save_name, print_period=100):
best_valid_loss = float('+inf')
for epoch in range(1, epochs+1):
train_dataloader(train_loader, model, loss, optimizer, epoch, print_period)
valid_loss = valid_dataloader(valid_loader, model, loss)
scheduler.step(valid_loss)
# TODO(jiacheng): add early stopping mechanism
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
# save_checkpoint()
return model
# TODO(jiacheng): implement this
def save_checkpoint():
return
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/backends/backend_gtk3cairo.py | 6 | 1874 | import backend_gtk3
import backend_cairo
from matplotlib.figure import Figure
class RendererGTK3Cairo(backend_cairo.RendererCairo):
def set_context(self, ctx):
self.gc.ctx = ctx
class FigureCanvasGTK3Cairo(backend_gtk3.FigureCanvasGTK3,
backend_cairo.FigureCanvasCairo):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
def _renderer_init(self):
"""use cairo renderer"""
self._renderer = RendererGTK3Cairo(self.figure.dpi)
def _render_figure(self, width, height):
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
# the _need_redraw flag doesnt work. it sometimes prevents
# the rendering and leaving the canvas blank
#if self._need_redraw:
self._renderer.set_context(ctx)
allocation = self.get_allocation()
x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height
self._render_figure(w, h)
#self._need_redraw = False
return False # finish event propagation?
class FigureManagerGTK3Cairo(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Cairo(figure)
manager = FigureManagerGTK3Cairo(canvas, num)
return manager
FigureManager = FigureManagerGTK3Cairo
show = backend_gtk3.show
| mit |
spatchcock/monetary_economics_python | scripts/9_simple_model_of_boom_and_bust.py | 1 | 10415 | # -*- coding: utf-8 -*-
# This script describes the iteration of a simple economic model examining changing
# private sector spending behaviour. It is described in the accompanying iPython
# Notebook and at
#
# http://misunderheard.org/monetary_economics/2017/07/29/simple-model-of-boom-and-bust/
#
#%% Include libraries
import matplotlib.pyplot as plt
import numpy as np
# number of time steps
N = 100
# exogeneous variables
G = 20 # government spending
theta = 0.2 # tax rate
alpha_H = 0.2 # propensity to spend out of saved wealth
# endogeneous variables
Y = np.zeros(N) # income
T = np.zeros(N) # tax revenue
C = np.zeros(N) # consumption
H_h = np.zeros(N) # private savings
H_g = np.zeros(N) # government balance
#%% define propensity to consume time series
alpha_Y = np.zeros(N)
alpha_Y[0:10] = 0.9 # set the first 10 elements
alpha_Y[10:N] = 0.8 # set the remainder of the elements
print(alpha_Y[0:15])
#%% set initial conditions
Y[0] = 100
C[0] = 80
T[0] = 20
H_h[0] = 40
H_g[0] = -40
#%% run model
for t in range(1, N):
# calculate total income for this time step (equation 1)
Y[t] = (G + alpha_H*H_h[t-1])/(1 - alpha_Y[t]*(1-theta))
# calculate the tax paid on income for this time step (3)
T[t] = theta * Y[t]
# calculate the consumption spending for this time step (4)
C[t] = alpha_Y[t]*(1 - theta)*Y[t] + alpha_H*H_h[t-1]
# calculate the new level of private savings for this time step (5)
H_h[t] = H_h[t-1] + Y[t] - T[t] - C[t]
# calculate the new level of government money balance (6)
H_g[t] = H_g[t-1] + T[t]- G
#%% plot aggregates
# initialise plot figure
fig = plt.figure(figsize=(12, 4))
# plot government spending (G) through time
gov_plot = fig.add_subplot(131, xlim=(0, N), ylim=(0, 120)) # set axis limits
gov_plot.plot(range(N), np.repeat(G,N), lw=3) # plot constant G versus time
gov_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('government spending') # label y axis
# plot consumption spending (C) through time
consumption_plot = fig.add_subplot(132, xlim=(0, N), ylim=(0, 120)) # set axis limits
consumption_plot.plot(range(N), C, lw=3) # plot C versus time
consumption_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('consumption')
# label y axis
# plot aggregate income (Y) through time
income_plot = fig.add_subplot(133, xlim=(0, N), ylim=(0, 120)) # set axis limits
income_plot.plot(range(N), Y, lw=3) # plot Y versus time
income_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('income') # label y axis
plt.tight_layout() # space subplots neatly
#%% plot government
# initialise plot figure
fig = plt.figure(figsize=(8, 4))
gov_plot = fig.add_subplot(121, xlim=(0, N), ylim=(0, np.max(G)*1.5)) # set axis limits
gov_plot.plot(range(N), np.repeat(G,N), lw=3) # plot constant G versus time
gov_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('government spending') # label y axis
tax_plot = fig.add_subplot(122, xlim=(0, N), ylim=(0, np.max(G)*1.5)) # set axis limits
tax_plot.plot(range(N), T, lw=3) # plot tax revenue versus time
tax_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('tax revenue') # label y axis
plt.tight_layout() # space subplots neatly
#%% plot sectoral balances
# initialise plot figure
fig = plt.figure(figsize=(8, 4))
budget_plot = fig.add_subplot(121, xlim=(0, N), ylim=(-10, 10)) # set axis limits
budget_plot.plot(range(N), T-np.repeat(G,N), lw=3, label='Government') # plot gov budget versus time
budget_plot.plot(range(N), Y-T-C, lw=3, label='Private sector') # plot private budget versus time
budget_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('budget position') # label y axis
plt.legend(loc='upper right')
balance_plot = fig.add_subplot(122, xlim=(0, N), ylim=(np.min(H_g), np.max(H_h))) # set axis limits
balance_plot.plot(range(N), H_g, lw=3, label='Government') # plot gov balance versus time
balance_plot.plot(range(N), H_h, lw=3, label='Private sector') # plot private balance versus time
balance_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('money balance') # label y axis
plt.legend(loc='center right')
plt.tight_layout() # space subplots neatly
#%% reset propensity to consum time series
alpha_Y[0:10] = 0.9
alpha_Y[10:50] = 0.8
alpha_Y[50:N] = 0.9
#%% run model
for t in range(1, N):
# calculate total income for this time step (equation 1)
Y[t] = (G + alpha_H*H_h[t-1])/(1 - alpha_Y[t]*(1-theta))
# calculate the tax paid on income for this time step (3)
T[t] = theta * Y[t]
# calculate the consumption spending for this time step (4)
C[t] = alpha_Y[t]*(1 - theta)*Y[t] + alpha_H*H_h[t-1]
# calculate the new level of private savings for this time step (5)
H_h[t] = H_h[t-1] + Y[t] - T[t] - C[t]
# calculate the new level of government money balance (6)
H_g[t] = H_g[t-1] + T[t]- G
#%% plot aggregates
# initialise plot figure
fig = plt.figure(figsize=(12, 4))
# plot government spending (G) through time
gov_plot = fig.add_subplot(131, xlim=(0, N), ylim=(0, 130)) # set axis limits
gov_plot.plot(range(N), np.repeat(G,N), lw=3) # plot constant G versus time
gov_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('government spending') # label y axis
# plot consumption spending (C) through time
consumption_plot = fig.add_subplot(132, xlim=(0, N), ylim=(0, 130)) # set axis limits
consumption_plot.plot(range(N), C, lw=3) # plot C versus time
consumption_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('consumption') # label y axis
# plot aggregate income (Y) through time
income_plot = fig.add_subplot(133, xlim=(0, N), ylim=(0, 130)) # set axis limits
income_plot.plot(range(N), Y, lw=3) # plot Y versus time
income_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('income') # label y axis
plt.tight_layout() # space subplots neatly
#%% plot government
# initialise plot figure
fig = plt.figure(figsize=(8, 4))
gov_plot = fig.add_subplot(121, xlim=(0, N), ylim=(0, np.max(G)*1.5)) # set axis limits
gov_plot.plot(range(N), np.repeat(G,N), lw=3) # plot constant G versus time
gov_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('government spending') # label y axis
tax_plot = fig.add_subplot(122, xlim=(0, N), ylim=(0, np.max(G)*1.5)) # set axis limits
tax_plot.plot(range(N), T, lw=3) # plot tax revenue versus time
tax_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('tax revenue') # label y axis
plt.tight_layout() # space subplots neatly
#%% plot sectoral balances
# initialise plot figure
fig = plt.figure(figsize=(8, 4))
budget_plot = fig.add_subplot(121, xlim=(0, N), ylim=(-10, 10)) # set axis limits
budget_plot.plot(range(N), T-np.repeat(G,N), lw=3, label='Government') # plot gov budget versus time
budget_plot.plot(range(N), Y-T-C, lw=3, label='Private sector') # plot private budget versus time
budget_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('budget position') # label y axis
plt.legend(loc='upper right')
balance_plot = fig.add_subplot(122, xlim=(0, N), ylim=(np.min(H_g), np.max(H_h))) # set axis limits
balance_plot.plot(range(N), H_g, lw=3, label='Government') # plot gov balance versus time
balance_plot.plot(range(N), H_h, lw=3, label='Private sector') # plot private balance versus time
balance_plot.grid() # add gridlines
plt.xlabel('time') # label x axis
plt.ylabel('money balance') # label y axis
plt.legend(loc='center right')
plt.tight_layout() # space subplots neatly
#%%
| mit |
JosmanPS/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
pramodh-bn/learn-data-edx | Week 7/a8.py | 1 | 5521 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import lines as mpl_lines
def getRandomLine():
return zip(np.random.uniform(-1,1.00,2),np.random.uniform(-1,1.00,2))
def getPoints(numberOfPoints):
pointList = zip(np.random.uniform(-1,1.00,numberOfPoints),np.random.uniform(-1,1.00,numberOfPoints))
return pointList
def isLeft(a, b, c):
return 1 if ((b[0] - a[0])*(c[1] - a[1]) - (b[1] - a[1])*(c[0] - a[0])) > 0 else -1;
def sign(x):
return 1 if x > 0 else -1
def sign1(x):
return -1 if x > 0 else 1
def doPLA(sample):
w = np.array([0,0,0])
iteration = 0
it = 0
while True:#(it < 10):
iteration = iteration + 1
it = it + 1
mismatch = list()
for i in sample:
#print("point in question ", i , " weight ", w)
yy = w[0] + w[1] * i[0] + w[2] * i[1]
#print("this is after applying weight to a point ",yy)
point = [i[0], i[1], sign(yy)]
if any(np.equal(sample, point).all(1)):
#print "point not in sample"
if(point[2] == -1):
mismatch.append((1, (i[0]), (i[1])))
else:
mismatch.append((-1, -(i[0]), -(i[1])))
#print " length ", len(mismatch), " mismatch list ",mismatch
if(len(mismatch) > 0):
#find a random point and update w
choiceIndex = np.random.randint(0, len(mismatch))
choice = mismatch[choiceIndex]
#print("choice ", choice)
w = w + choice
#print "new weight ", w
else:
break
#print("this is the iteration ", iteration)
#print("this is the weight ", w)
#montelist = [monetcarlo((x1,y1),(x2,y2),w,10000) for i in range(5)]
#print("Montelist " , montelist)
#monteavg = sum([i for i in montelist])/10
return w, iteration
def getMisMatches1(data, weights):
#print data
list1 = np.empty(len(data))
list1.fill(weights[0])
results = list1+ weights[1]*data[:,0]+weights[2]*data[:,1]
print results
return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
def getMisMatches(data, weights):
#print data
list1 = np.empty(len(data))
list1.fill(weights[0])
results = list1+ weights[1]*data[:,0]+weights[2]*data[:,1]
results = -1 * results
return float(len(data) - np.sum(np.sign(results) == np.sign(data[:,2])))/len(data)
def doMonteCarloNP(pointa, pointb, weights, nopoint):
#print "weights ", weight
points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
#print points
dataset_Monte = np.array([(i[0],i[1], isLeft(pointa,pointb,i)) for i in points])
#print dataset_Monte
return getMisMatches(dataset_Monte, weights)
def doMonteCarlo(pointa, pointb, weight, nopoint):
#print "weights ", weight
points = [(np.random.uniform(-1,1), np.random.uniform(-1,1)) for i in range(nopoint)]
dataset_D = [(i[0],i[1], isLeft(pointa,pointb,i)) for i in points]
dataset = np.array(dataset_D)
mismatches = 0
datasetList = []
for i in dataset_D:
yy = weight[0] + weight[1] * i[0] + weight[2] * i[1]
datasetList.append((i[0],i[1], sign1(yy)))
if(sign1(yy) != i[2]):
mismatches = mismatches + 1
#print("mismatches ", mismatches)
#plotMonteData(datasetList, ax)
#xpos = [(i[0], i[1]) for i in dataset_D if i[2] == 1]
#xneg = [(i[0], i[1]) for i in dataset_D if i[2] == -1]
xpos = dataset[dataset[:,2] == 1]
xpos1 = np.array([(i[0], i[1], 1) for i in dataset if i[2] == 1])
print len(xpos), len(xpos1)
xneg = dataset[dataset[:,2] == -1]
#plt.gca().plot(zip(*xpos)[0], zip(*xpos)[1], "+")
#plt.gca().plot(zip(*xneg)[0], zip(*xneg)[1], "*")
#plt.plot(xpos[:,0], xpos[:,1], "+")
#plt.plot(xneg[:,0], xneg[:,1], "*")
#plt.show()
return float(mismatches)/nopoint
def plotData(sample, line, w):
xpos = sample[sample[:,2] == 1]
xneg = sample[sample[:,2] == -1]
plt.plot(xpos[:,0], xpos[:,1], "x")
plt.plot(xneg[:,0], xneg[:,1], "o")
slope = (line[1][1] - line[0][1])/(line[1][0] - line[0][0])
intercept = line[0][1] - slope*line[0][0]
# update the points to be on the axes limits
x = plt.gca().get_xlim()
#y = plt.gca().get_ylim()
data_y = [x[0]*slope+intercept, x[1]*slope+intercept]
line = mpl_lines.Line2D(x, data_y, color='red')
plaPoint1 = (0.1,-((w[1]/w[2])*0.1) + (-(w[0]/w[2])))
plaPoint2 = (0.8, -((w[1]/w[2])*0.8) + (-(w[0]/w[2])))
slopePLA = (plaPoint2[1] - plaPoint1[1])/(plaPoint2[0] - plaPoint1[0])
interceptPLA = plaPoint1[1] - slope*plaPoint1[0]
xPLA = plt.gca().get_xlim()
data_yPLA = [xPLA[0]*slopePLA+interceptPLA, xPLA[1]*slopePLA+interceptPLA]
linePLA = mpl_lines.Line2D(xPLA, data_yPLA, color='blue')
plt.gca().add_line(line)
plt.gca().add_line(linePLA)
#plt.show()
if __name__ == "__main__":
avgofavgiters = list()
avgofavgprob = list()
nopoints = 300
montel = list()
iteravg = list()
for k in range(100):
cluster = getPoints(nopoints)
line = getRandomLine()
sample = np.array([(i[0], i[1], isLeft(line[0], line[1], i)) for i in cluster])
#plotData(sample, line)
w, it = doPLA(sample)
#print(getMisMatches(sample, w))
#plotData(sample, line, w)
montelist = [ doMonteCarloNP(line[0], line[1], w, 100) for i in range(100)]
#print sum(montelist)/len(montelist)
montel.append(sum(montelist)/len(montelist))
iteravg.append(it)
print sum(montel)/len(montel), sum(iteravg)/len(iteravg)
| unlicense |
lbdreyer/iris | docs/iris/gallery_code/meteorology/plot_COP_1d.py | 4 | 3953 | """
Global Average Annual Temperature Plot
======================================
Produces a time-series plot of North American temperature forecasts for 2
different emission scenarios. Constraining data to a limited spatial area also
features in this example.
The data used comes from the HadGEM2-AO model simulations for the A1B and E1
scenarios, both of which were derived using the IMAGE Integrated Assessment
Model (Johns et al. 2011; Lowe et al. 2009).
References
----------
Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
doi:10.1007/s00382-011-1005-5.
Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
doi:10.1029/2009EO210001.
.. seealso::
Further details on the aggregation functionality being used in this example
can be found in :ref:`cube-statistics`.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.analysis.cartography
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load data into three Cubes, one for each set of NetCDF files.
e1 = iris.load_cube(iris.sample_data_path("E1_north_america.nc"))
a1b = iris.load_cube(iris.sample_data_path("A1B_north_america.nc"))
# load in the global pre-industrial mean temperature, and limit the domain
# to the same North American region that e1 and a1b are at.
north_america = iris.Constraint(
longitude=lambda v: 225 <= v <= 315, latitude=lambda v: 15 <= v <= 60
)
pre_industrial = iris.load_cube(
iris.sample_data_path("pre-industrial.pp"), north_america
)
# Generate area-weights array. As e1 and a1b are on the same grid we can
# do this just once and re-use. This method requires bounds on lat/lon
# coords, so let's add some in sensible locations using the "guess_bounds"
# method.
e1.coord("latitude").guess_bounds()
e1.coord("longitude").guess_bounds()
e1_grid_areas = iris.analysis.cartography.area_weights(e1)
pre_industrial.coord("latitude").guess_bounds()
pre_industrial.coord("longitude").guess_bounds()
pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial)
# Perform the area-weighted mean for each of the datasets using the
# computed grid-box areas.
pre_industrial_mean = pre_industrial.collapsed(
["latitude", "longitude"], iris.analysis.MEAN, weights=pre_grid_areas
)
e1_mean = e1.collapsed(
["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas
)
a1b_mean = a1b.collapsed(
["latitude", "longitude"], iris.analysis.MEAN, weights=e1_grid_areas
)
# Plot the datasets
qplt.plot(e1_mean, label="E1 scenario", lw=1.5, color="blue")
qplt.plot(a1b_mean, label="A1B-Image scenario", lw=1.5, color="red")
# Draw a horizontal line showing the pre-industrial mean
plt.axhline(
y=pre_industrial_mean.data,
color="gray",
linestyle="dashed",
label="pre-industrial",
lw=1.5,
)
# Constrain the period 1860-1999 and extract the observed data from a1b
constraint = iris.Constraint(
time=lambda cell: 1860 <= cell.point.year <= 1999
)
observed = a1b_mean.extract(constraint)
# Assert that this data set is the same as the e1 scenario:
# they share data up to the 1999 cut off.
assert np.all(np.isclose(observed.data, e1_mean.extract(constraint).data))
# Plot the observed data
qplt.plot(observed, label="observed", color="black", lw=1.5)
# Add a legend and title
plt.legend(loc="upper left")
plt.title("North American mean air temperature", fontsize=18)
plt.xlabel("Time / year")
plt.grid()
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
hainm/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
thorwhalen/ut | stats/classification/bin/iterpol.py | 1 | 2902 | __author__ = 'thor'
from numpy import *
import numpy as np
import time
import sklearn as sk
import scipy.interpolate as interpolate
from ut.stats.classification.bin.base import BinaryClassifierBase2D
from ut.stats.util import binomial_probs_to_multinomial_probs
class BinaryClassificationByInterpolatedProbabilities(BinaryClassifierBase2D):
"""
This is a BinaryClassifierBase2D that estimates probabilities by interpolation.
The fit function finds n_clusters clusters of the x data and assigns to each cluster center the mean of the ys of
the n_neighbors nearest neighbors of the center.
The ys of every other point of the x space are then estimated by interpolating over these clusters centers.
"""
def __init__(self, n_clusters=500, n_neighbors=3000, interpolator='cubic', **kwargs):
super(BinaryClassificationByInterpolatedProbabilities, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.n_neighbors = n_neighbors
self.interpolator = interpolator
self.clus = []
self.cluster_x = None
self.cluster_y = None
self.iterpol = None
self.nnb_iterpol = None
def fit(self, x, y):
t0 = time.time()
assert set(y.flatten()) == set([0, 1]), "y data (target data) needs to have only 0s and 1s"
# determine the clusters
self.clus = sk.cluster.MiniBatchKMeans(n_clusters=self.n_clusters)
self.clus.fit(x)
# determine the nearest neighbor for each data point
nns = sk.neighbors.NearestNeighbors(n_neighbors=self.n_neighbors)
nns.fit(x)
neighbor_dist, neighbor_idx = nns.kneighbors(self.clus.cluster_centers_, n_neighbors=self.n_neighbors)
# compute the cluster means
self.cluster_x = self.clus.cluster_centers_
self.cluster_y = array([nanmean(y[neighbor_idx[i, :]]) for i in range(shape(self.cluster_x)[0])])
# make the interpolator
if self.interpolator == 'linear':
self.iterpol = interpolate.LinearNDInterpolator(self.cluster_x, self.cluster_y, fill_value=nan)
else:
self.iterpol = interpolate.CloughTocher2DInterpolator(self.cluster_x, self.cluster_y, fill_value=nan)
self.nnb_iterpol = interpolate.NearestNDInterpolator(self.cluster_x, self.cluster_y)
print("fit elapsed time: %.02f minutes" % ((time.time() - t0) / 60.))
def predict_proba(self, x):
iterpolations = self.iterpol(x)
lidx = np.isnan(iterpolations)
iterpolations[lidx] = self.nnb_iterpol(x)[lidx]
iterpolations[iterpolations < 0] = 0.0 # cubic interpolation might create negatives, so we set these to 0
return binomial_probs_to_multinomial_probs(iterpolations)
# if only_event_probs:
# return iterpolations
# else:
# return binomial_probs_to_multinomial_probs(iterpolations)
| mit |
xuewei4d/scikit-learn | sklearn/neighbors/_nca.py | 10 | 20696 | # coding: utf-8
"""
Neighborhood Component Analysis
"""
# Authors: William de Vazelhes <[email protected]>
# John Chiotellis <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from warnings import warn
import numpy as np
import sys
import time
import numbers
from scipy.optimize import minimize
from ..utils.extmath import softmax
from ..metrics import pairwise_distances
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import LabelEncoder
from ..decomposition import PCA
from ..utils.multiclass import check_classification_targets
from ..utils.random import check_random_state
from ..utils.validation import check_is_fitted, check_array, check_scalar
from ..utils.validation import _deprecate_positional_args
from ..exceptions import ConvergenceWarning
class NeighborhoodComponentsAnalysis(TransformerMixin, BaseEstimator):
"""Neighborhood Components Analysis
Neighborhood Component Analysis (NCA) is a machine learning algorithm for
metric learning. It learns a linear transformation in a supervised fashion
to improve the classification accuracy of a stochastic nearest neighbors
rule in the transformed space.
Read more in the :ref:`User Guide <nca>`.
Parameters
----------
n_components : int, default=None
Preferred dimensionality of the projected space.
If None it will be set to ``n_features``.
init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \
(n_features_a, n_features_b), default='auto'
Initialization of the linear transformation. Possible options are
'auto', 'pca', 'lda', 'identity', 'random', and a numpy array of shape
(n_features_a, n_features_b).
'auto'
Depending on ``n_components``, the most reasonable initialization
will be chosen. If ``n_components <= n_classes`` we use 'lda', as
it uses labels information. If not, but
``n_components < min(n_features, n_samples)``, we use 'pca', as
it projects data in meaningful directions (those of higher
variance). Otherwise, we just use 'identity'.
'pca'
``n_components`` principal components of the inputs passed
to :meth:`fit` will be used to initialize the transformation.
(See :class:`~sklearn.decomposition.PCA`)
'lda'
``min(n_components, n_classes)`` most discriminative
components of the inputs passed to :meth:`fit` will be used to
initialize the transformation. (If ``n_components > n_classes``,
the rest of the components will be zero.) (See
:class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
'identity'
If ``n_components`` is strictly smaller than the
dimensionality of the inputs passed to :meth:`fit`, the identity
matrix will be truncated to the first ``n_components`` rows.
'random'
The initial transformation will be a random array of shape
`(n_components, n_features)`. Each value is sampled from the
standard normal distribution.
numpy array
n_features_b must match the dimensionality of the inputs passed to
:meth:`fit` and n_features_a must be less than or equal to that.
If ``n_components`` is not None, n_features_a must match it.
warm_start : bool, default=False
If True and :meth:`fit` has been called before, the solution of the
previous call to :meth:`fit` is used as the initial linear
transformation (``n_components`` and ``init`` will be ignored).
max_iter : int, default=50
Maximum number of iterations in the optimization.
tol : float, default=1e-5
Convergence tolerance for the optimization.
callback : callable, default=None
If not None, this function is called after every iteration of the
optimizer, taking as arguments the current solution (flattened
transformation matrix) and the number of iterations. This might be
useful in case one wants to examine or store the transformation
found after each iteration.
verbose : int, default=0
If 0, no progress messages will be printed.
If 1, progress messages will be printed to stdout.
If > 1, progress messages will be printed and the ``disp``
parameter of :func:`scipy.optimize.minimize` will be set to
``verbose - 2``.
random_state : int or numpy.RandomState, default=None
A pseudo random number generator object or a seed for it if int. If
``init='random'``, ``random_state`` is used to initialize the random
transformation. If ``init='pca'``, ``random_state`` is passed as an
argument to PCA when initializing the transformation. Pass an int
for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear transformation learned during fitting.
n_iter_ : int
Counts the number of iterations performed by the optimizer.
random_state_ : numpy.RandomState
Pseudo random number generator object used during initialization.
Examples
--------
>>> from sklearn.neighbors import NeighborhoodComponentsAnalysis
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... stratify=y, test_size=0.7, random_state=42)
>>> nca = NeighborhoodComponentsAnalysis(random_state=42)
>>> nca.fit(X_train, y_train)
NeighborhoodComponentsAnalysis(...)
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> knn.fit(X_train, y_train)
KNeighborsClassifier(...)
>>> print(knn.score(X_test, y_test))
0.933333...
>>> knn.fit(nca.transform(X_train), y_train)
KNeighborsClassifier(...)
>>> print(knn.score(nca.transform(X_test), y_test))
0.961904...
References
----------
.. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov.
"Neighbourhood Components Analysis". Advances in Neural Information
Processing Systems. 17, 513-520, 2005.
http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf
.. [2] Wikipedia entry on Neighborhood Components Analysis
https://en.wikipedia.org/wiki/Neighbourhood_components_analysis
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, init='auto', warm_start=False,
max_iter=50, tol=1e-5, callback=None, verbose=0,
random_state=None):
self.n_components = n_components
self.init = init
self.warm_start = warm_start
self.max_iter = max_iter
self.tol = tol
self.callback = callback
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
returns a trained NeighborhoodComponentsAnalysis model.
"""
# Verify inputs X and y and NCA parameters, and transform a copy if
# needed
X, y, init = self._validate_params(X, y)
# Initialize the random generator
self.random_state_ = check_random_state(self.random_state)
# Measure the total training time
t_train = time.time()
# Compute a mask that stays fixed during optimization:
same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
# (n_samples, n_samples)
# Initialize the transformation
transformation = self._initialize(X, y, init)
# Create a dictionary of parameters to be passed to the optimizer
disp = self.verbose - 2 if self.verbose > 1 else -1
optimizer_params = {'method': 'L-BFGS-B',
'fun': self._loss_grad_lbfgs,
'args': (X, same_class_mask, -1.0),
'jac': True,
'x0': transformation,
'tol': self.tol,
'options': dict(maxiter=self.max_iter, disp=disp),
'callback': self._callback
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
# Reshape the solution found by the optimizer
self.components_ = opt_result.x.reshape(-1, X.shape[1])
# Stop timer
t_train = time.time() - t_train
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warn('[{}] NCA did not converge: {}'.format(
cls_name, opt_result.message),
ConvergenceWarning)
print('[{}] Training took {:8.2f}s.'.format(cls_name, t_train))
return self
def transform(self, X):
"""Applies the learned transformation to the given data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data samples.
Returns
-------
X_embedded: ndarray of shape (n_samples, n_components)
The data samples transformed.
Raises
------
NotFittedError
If :meth:`fit` has not been called before.
"""
check_is_fitted(self)
X = check_array(X)
return np.dot(X, self.components_.T)
def _validate_params(self, X, y):
"""Validate parameters as soon as :meth:`fit` is called.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The validated training samples.
y : ndarray of shape (n_samples,)
The validated training labels, encoded to be integers in
the range(0, n_classes).
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Raises
-------
TypeError
If a parameter is not an instance of the desired type.
ValueError
If a parameter's value violates its legal value range or if the
combination of two or more given parameters is incompatible.
"""
# Validate the inputs X and y, and converts y to numerical classes.
X, y = self._validate_data(X, y, ensure_min_samples=2)
check_classification_targets(y)
y = LabelEncoder().fit_transform(y)
# Check the preferred dimensionality of the projected space
if self.n_components is not None:
check_scalar(
self.n_components, 'n_components', numbers.Integral, min_val=1)
if self.n_components > X.shape[1]:
raise ValueError('The preferred dimensionality of the '
'projected space `n_components` ({}) cannot '
'be greater than the given data '
'dimensionality ({})!'
.format(self.n_components, X.shape[1]))
# If warm_start is enabled, check that the inputs are consistent
check_scalar(self.warm_start, 'warm_start', bool)
if self.warm_start and hasattr(self, 'components_'):
if self.components_.shape[1] != X.shape[1]:
raise ValueError('The new inputs dimensionality ({}) does not '
'match the input dimensionality of the '
'previously learned transformation ({}).'
.format(X.shape[1],
self.components_.shape[1]))
check_scalar(self.max_iter, 'max_iter', numbers.Integral, min_val=1)
check_scalar(self.tol, 'tol', numbers.Real, min_val=0.)
check_scalar(self.verbose, 'verbose', numbers.Integral, min_val=0)
if self.callback is not None:
if not callable(self.callback):
raise ValueError('`callback` is not callable.')
# Check how the linear transformation should be initialized
init = self.init
if isinstance(init, np.ndarray):
init = check_array(init)
# Assert that init.shape[1] = X.shape[1]
if init.shape[1] != X.shape[1]:
raise ValueError(
'The input dimensionality ({}) of the given '
'linear transformation `init` must match the '
'dimensionality of the given inputs `X` ({}).'
.format(init.shape[1], X.shape[1]))
# Assert that init.shape[0] <= init.shape[1]
if init.shape[0] > init.shape[1]:
raise ValueError(
'The output dimensionality ({}) of the given '
'linear transformation `init` cannot be '
'greater than its input dimensionality ({}).'
.format(init.shape[0], init.shape[1]))
if self.n_components is not None:
# Assert that self.n_components = init.shape[0]
if self.n_components != init.shape[0]:
raise ValueError('The preferred dimensionality of the '
'projected space `n_components` ({}) does'
' not match the output dimensionality of '
'the given linear transformation '
'`init` ({})!'
.format(self.n_components,
init.shape[0]))
elif init in ['auto', 'pca', 'lda', 'identity', 'random']:
pass
else:
raise ValueError(
"`init` must be 'auto', 'pca', 'lda', 'identity', 'random' "
"or a numpy array of shape (n_components, n_features).")
return X, y, init
def _initialize(self, X, y, init):
"""Initialize the transformation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The training labels.
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Returns
-------
transformation : ndarray of shape (n_components, n_features)
The initialized linear transformation.
"""
transformation = init
if self.warm_start and hasattr(self, 'components_'):
transformation = self.components_
elif isinstance(init, np.ndarray):
pass
else:
n_samples, n_features = X.shape
n_components = self.n_components or n_features
if init == 'auto':
n_classes = len(np.unique(y))
if n_components <= min(n_features, n_classes - 1):
init = 'lda'
elif n_components < min(n_features, n_samples):
init = 'pca'
else:
init = 'identity'
if init == 'identity':
transformation = np.eye(n_components, X.shape[1])
elif init == 'random':
transformation = self.random_state_.randn(n_components,
X.shape[1])
elif init in {'pca', 'lda'}:
init_time = time.time()
if init == 'pca':
pca = PCA(n_components=n_components,
random_state=self.random_state_)
if self.verbose:
print('Finding principal components... ', end='')
sys.stdout.flush()
pca.fit(X)
transformation = pca.components_
elif init == 'lda':
from ..discriminant_analysis import (
LinearDiscriminantAnalysis)
lda = LinearDiscriminantAnalysis(n_components=n_components)
if self.verbose:
print('Finding most discriminative components... ',
end='')
sys.stdout.flush()
lda.fit(X, y)
transformation = lda.scalings_.T[:n_components]
if self.verbose:
print('done in {:5.2f}s'.format(time.time() - init_time))
return transformation
def _callback(self, transformation):
"""Called after each iteration of the optimizer.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The solution computed by the optimizer in this iteration.
"""
if self.callback is not None:
self.callback(transformation, self.n_iter_)
self.n_iter_ += 1
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
"""Compute the loss and the loss gradient w.r.t. ``transformation``.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The raveled linear transformation on which to compute loss and
evaluate gradient.
X : ndarray of shape (n_samples, n_features)
The training samples.
same_class_mask : ndarray of shape (n_samples, n_samples)
A mask where ``mask[i, j] == 1`` if ``X[i]`` and ``X[j]`` belong
to the same class, and ``0`` otherwise.
Returns
-------
loss : float
The loss computed for the given transformation.
gradient : ndarray of shape (n_components * n_features,)
The new (flattened) gradient of the loss.
"""
if self.n_iter_ == 0:
self.n_iter_ += 1
if self.verbose:
header_fields = ['Iteration', 'Objective Value', 'Time(s)']
header_fmt = '{:>10} {:>20} {:>10}'
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print('[{}]'.format(cls_name))
print('[{}] {}\n[{}] {}'.format(cls_name, header,
cls_name, '-' * len(header)))
t_funcall = time.time()
transformation = transformation.reshape(-1, X.shape[1])
X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
# Compute softmax distances
p_ij = pairwise_distances(X_embedded, squared=True)
np.fill_diagonal(p_ij, np.inf)
p_ij = softmax(-p_ij) # (n_samples, n_samples)
# Compute loss
masked_p_ij = p_ij * same_class_mask
p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
loss = np.sum(p)
# Compute gradient of loss w.r.t. `transform`
weighted_p_ij = masked_p_ij - p_ij * p
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
# time complexity of the gradient: O(n_components x n_samples x (
# n_samples + n_features))
if self.verbose:
t_funcall = time.time() - t_funcall
values_fmt = '[{}] {:>10} {:>20.6e} {:>10.2f}'
print(values_fmt.format(self.__class__.__name__, self.n_iter_,
loss, t_funcall))
sys.stdout.flush()
return sign * loss, sign * gradient.ravel()
def _more_tags(self):
return {'requires_y': True}
| bsd-3-clause |
ajdawson/eofs | doc/conf.py | 1 | 9234 | # -*- coding: utf-8 -*-
#
# eofs documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 5 15:47:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'eofs'
copyright = '2013-{} Andrew Dawson'.format(time.localtime().tm_year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import eofs
version = eofs.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- extlinks configuration ----------------------------------------------------
# Allow e.g. :issue:`42` and :pr:`42` roles:
extlinks = {'issue': ('https://github.com/ajdawson/eofs/issues/%s', '#'),
'pr': ('https://github.com/ajdawson/eofs/pull/%s', '#')}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx13'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['sidebar_toc.html',
'relations.html',
'sourcelink.html',
'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'eofsdoc'
# Options for intersphinx.
intersphinx_mapping = {
'eof2': ('http://ajdawson.github.com/eof2', None),
'iris': ('http://scitools.org.uk/iris/docs/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'xarray': ('http://xarray.pydata.org/en/stable', None),
'dask': ('https://docs.dask.org/en/latest', None),
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': """\\usepackage{amssymb}
\\usepackage{amsmath}""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('userguide/index', 'userguide.tex', 'eofs User Guide', 'Andrew Dawson',
'manual'),
('examples/index', 'examples.tex', 'eofs Examples', 'Andrew Dawson',
'manual'),
('api/index', 'api.tex', 'eofs API Reference', 'Andrew Dawson',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'eofs', 'eofs Documentation',
['Andrew Dawson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'eofs', 'eofs Documentation',
'Andrew Dawson', 'eofs', 'EOF analysis in Python.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Autodoc settings -- #
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_docstring_signature = True
autosummary_generate = True
| gpl-3.0 |
4thgen/DCGAN-CIFAR10 | utils.py | 1 | 7693 | """
Most codes from https://github.com/carpedm20/DCGAN-tensorflow
"""
from __future__ import division
import math
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
import os, gzip
import _pickle
import tensorflow as tf
import tensorflow.contrib.slim as slim
import matplotlib.pyplot as plt
def unpickle(relpath):
print(relpath)
with open(relpath, 'rb') as fp:
d = _pickle.load(fp,encoding='bytes')
return d
def prepare_input(data=None, labels=None):
image_height = 32
image_width = 32
image_depth = 3
assert(data.shape[1] == image_height * image_width * image_depth)
assert(data.shape[0] == labels.shape[0])
#do mean normaization across all samples
mu = np.mean(data, axis=0)
mu = mu.reshape(1,-1)
sigma = np.std(data, axis=0)
sigma = sigma.reshape(1, -1)
data = data - mu
data = data / sigma
is_nan = np.isnan(data)
is_inf = np.isinf(data)
if np.any(is_nan) or np.any(is_inf):
print('data is not well-formed : is_nan {n}, is_inf: {i}'.format(n= np.any(is_nan), i=np.any(is_inf)))
#data is transformed from (no_of_samples, 3072) to (no_of_samples , image_height, image_width, image_depth)
#make sure the type of the data is no.float32
data = data.reshape([-1,image_depth, image_height, image_width])
data = data.transpose([0, 2, 3, 1])
data = data.astype(np.float32)
#print("prepare_input: ",len(data),len(labels))
return data, labels
def read_cifar10(filename): # queue one element
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
data = unpickle(filename)
#print(data.keys())
#value = np.asarray(data[b'data']).astype(np.float32)
#labels = np.asarray(data[b'labels']).astype(np.int32)
value = np.asarray(data[b'data']).astype(np.float32)
labels = np.asarray(data[b'labels']).astype(np.int32)
#print("read_cifar10: ",len(value),len(labels))
return prepare_input(value,labels)
#return prepare_input(value.astype(np.float32),labels.astype(np.int32))
def load_cifar10():
data_dir = "\\TensorflowData\\cifar-10-batches-py"
filenames = [os.path.join(data_dir, 'data_batch_%d' % i) for i in xrange(1, 6)]
#filenames = ['data_batch_%d.bin' % i for i in xrange(1, 6)]
filenames.append(os.path.join(data_dir, 'test_batch'))
for idx , filename in enumerate(filenames):
temp_X, temp_y = read_cifar10(filename)
print("load_cifar10 for temp shape:",temp_X.shape,temp_y.shape)
if idx == 0:
dataX = temp_X
labely = temp_y
else:
dataX = np.append(dataX,temp_X)
labely = np.append(labely,temp_y)
dataX = dataX.reshape([-1,32, 32, 3])
print("load_cifar10 for len:",len(dataX),len(labely))
print("load_cifar10 for shape:",dataX.shape,labely.shape)
seed = 547
np.random.seed(seed)
np.random.shuffle(dataX)
np.random.seed(seed)
np.random.shuffle(labely)
y_vec = np.zeros((len(labely), 10), dtype=np.float)
for i, label in enumerate(labely):
y_vec[i, labely[i]] = 1.0
return dataX / 255., y_vec
def load_mnist(dataset_name):
data_dir = os.path.join("\\TensorflowData\\", dataset_name) # customizing edit
def extract_data(filename, num_data, head_size, data_size):
with gzip.open(filename) as bytestream:
bytestream.read(head_size)
buf = bytestream.read(data_size * num_data)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float)
return data
data = extract_data(data_dir + '\\train-images-idx3-ubyte.gz', 60000, 16, 28 * 28)
trX = data.reshape((60000, 28, 28, 1))
data = extract_data(data_dir + '\\train-labels-idx1-ubyte.gz', 60000, 8, 1)
trY = data.reshape((60000))
data = extract_data(data_dir + '\\t10k-images-idx3-ubyte.gz', 10000, 16, 28 * 28)
teX = data.reshape((10000, 28, 28, 1))
data = extract_data(data_dir + '\\t10k-labels-idx1-ubyte.gz', 10000, 8, 1)
teY = data.reshape((10000))
trY = np.asarray(trY)
teY = np.asarray(teY)
X = np.concatenate((trX, teX), axis=0)
y = np.concatenate((trY, teY), axis=0).astype(np.int)
seed = 547
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(y)
y_vec = np.zeros((len(y), 10), dtype=np.float)
for i, label in enumerate(y):
y_vec[i, y[i]] = 1.0
return X / 255., y_vec
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_image(image_path, input_height, input_width, resize_height=64, resize_width=64, crop=True, grayscale=False):
image = imread(image_path, grayscale)
return transform(image, input_height, input_width, resize_height, resize_width, crop)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, grayscale = False):
if (grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c)) # 28 * 8, 28 * 8, c
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1])) # 28 * 8, 28 * 8
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w, resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width, resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(image, input_height, input_width, resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def save_matplot_img(images, size, image_path):
# revice image data // M*N*3 // RGB float32 : value must set between 0. with 1.
for idx in range(64):
vMin = np.amin(images[idx])
vMax = np.amax(images[idx])
img_arr = images[idx].reshape(32*32*3,1) # flatten
for i, v in enumerate(img_arr):
img_arr[i] = (v-vMin)/(vMax-vMin)
img_arr = img_arr.reshape(32,32,3) # M*N*3
# matplot display
plt.subplot(8,8,idx+1),plt.imshow(img_arr, interpolation='nearest')
#plt.title("pred.:{}".format(np.argmax(self.data_y[0]),fontsize=10))
plt.axis("off")
plt.savefig(image_path)
#plt.show()
| apache-2.0 |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/tqdm/_tqdm.py | 3 | 36089 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division
# import compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet
import sys
from time import time
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmDeprecationWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmDeprecationWarning(Exception):
# not suppressed if raised
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\nTqdmDeprecationWarning: " + str(msg).rstrip() + '\n')
else:
super(TqdmDeprecationWarning, self).__init__(msg, *a, **k)
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
@staticmethod
def format_sizeof(num, suffix=''):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= 1000.0
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='',
ascii=False, unit='it', unit_scale=False, rate=None,
bar_format=None):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool, optional
If set, the number of iterations will printed with an
appropriate SI metric prefix (K = 10^3, M = 10^6, etc.)
[default: False].
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if (rate and (rate < 1)) else None
format_sizeof = tqdm.format_sizeof
rate_fmt = ((format_sizeof(inv_rate if inv_rate else rate)
if unit_scale else
'{0:5.2f}'.format(inv_rate if inv_rate else rate))
if rate else '?') \
+ ('s' if inv_rate else unit) + '/' + (unit if inv_rate else 's')
if unit_scale:
n_fmt = format_sizeof(n)
total_fmt = format_sizeof(total) if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
l_bar = (prefix if prefix else '') + \
'{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': rate if inv_rate is None else inv_rate,
'rate_noinv': rate,
'rate_noinv_fmt': ((format_sizeof(rate)
if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s',
'rate_fmt': rate_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix if prefix else '',
# 'bar': full_bar # replaced by procedure below
}
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**bar_args)
r_bar = r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
n_fmt, unit, elapsed_str, rate_fmt)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
cls._instances.add(instance)
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
""" Skips specified instance """
try:
return max(inst.pos for inst in cls._instances
if inst is not instance) + 1
except ValueError as e:
if "arg is an empty sequence" in str(e):
return 0
raise # pragma: no cover
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
try: # in case instance was explicitly positioned, it won't be in set
cls._instances.remove(instance)
for inst in cls._instances:
if inst.pos > instance.pos:
inst.pos -= 1
except KeyError:
pass
@classmethod
def write(cls, s, file=sys.stdout, end="\n"):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == fp or all(f in (sys.stdout, sys.stderr)
for f in (fp, inst.fp)):
inst.clear()
inst_cleared.append(inst)
# Write the message
fp.write(s)
fp.write(end)
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh()
# TODO: make list of all instances incl. absolutely positioned ones?
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.groupby import SeriesGroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby import PanelGroupBy
from pandas import Panel
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
*args, *kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = getattr(df, 'ngroups', None)
if total is None: # not grouped
total = len(df) if isinstance(df, Series) \
else df.size // len(df)
else:
total += 1 # pandas calls update once too many
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
t.update()
return func(*args, **kwargs)
# Apply the provided function (in *args and **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, *args, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=sys.stderr, ncols=None, mininterval=0.1,
maxinterval=10.0, miniters=None, ascii=None, disable=False,
unit='it', unit_scale=False, dynamic_ncols=False,
smoothing=0.3, bar_format=None, initial=0, position=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress update interval, in seconds [default: 10.0].
miniters : int, optional
Minimum progress update interval, in iterations.
If specified, will set `mininterval` to 0.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
If unspecified, will use '{l_bar}{bar}{r_bar}', where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (TqdmDeprecationWarning("""\
`nested` is deprecated and automated. Use position instead for manual control.
""", fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
ncols = dynamic_ncols(file)
else:
ncols = _environ_cols_wrapper()(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc + ': ' if desc else ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
self.pos = self._get_free_pos(self) if position is None else position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
if self.pos:
self.moveto(self.pos)
self.sp(self.format_meter(self.n, total, 0,
(dynamic_ncols(file) if dynamic_ncols else ncols),
self.desc, ascii, unit, unit_scale, None, bar_format))
if self.pos:
self.moveto(-self.pos)
# Init the time counter
self.start_t = self.last_print_t = self._time()
def __len__(self):
return (self.iterable.shape[0] if hasattr(self.iterable, 'shape')
else len(self.iterable)) if self.iterable is not None \
else self.total
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(self.n, self.total,
time() - self.last_print_t,
self.ncols, self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time
if self.avg_time else None, self.bar_format)
def __lt__(self, other):
return self.pos < other.pos
def __le__(self, other):
return (self < other) or (self == other)
def __eq__(self, other):
return self.pos == other.pos
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def __hash__(self):
return id(self)
def __iter__(self):
''' Backward-compatibility to use: for x in tqdm(iterable) '''
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
_time = self._time
format_meter = self.format_meter
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check the counter first (avoid calls to time())
if n - last_print_n >= miniters:
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
if self.pos:
self.moveto(self.pos)
# Printing the bar's update
sp(format_meter(
n, self.total, elapsed,
(dynamic_ncols(self.fp) if dynamic_ncols
else ncols),
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format))
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
if self.n - self.last_print_n >= self.miniters:
# We check the counter first, to reduce the overhead of time()
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # should be n?
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
if self.pos:
self.moveto(self.pos)
# Print bar's update
self.sp(self.format_meter(
self.n, self.total, elapsed,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format))
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = self.pos
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
cur_t = self._time()
# stats for overall rate (no weighted average)
self.sp(self.format_meter(
self.n, self.total, cur_t - self.start_t,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols
else self.ncols),
self.desc, self.ascii, self.unit, self.unit_scale, None,
self.bar_format))
if pos:
self.moveto(-pos)
else:
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None):
"""
Set/modify description of the progress bar.
"""
self.desc = desc + ': ' if desc else ''
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
def clear(self, nomove=False):
"""
Clear current bar display
"""
if not nomove:
self.moveto(self.pos)
# clear up the bar (can't rely on sp(''))
self.fp.write('\r')
self.fp.write(' ' * (self.ncols if self.ncols else 10))
self.fp.write('\r') # place cursor back at the beginning of line
if not nomove:
self.moveto(-self.pos)
def refresh(self):
"""
Force refresh the display of this bar
"""
self.moveto(self.pos)
# clear up this line's content (whatever there was)
self.clear(nomove=True)
# Print current/last bar state
self.fp.write(self.__repr__())
self.moveto(-self.pos)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| mit |
jseabold/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 22 | 13165 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
schoolie/bokeh | bokeh/charts/attributes.py | 3 | 14573 | '''
'''
from __future__ import absolute_import
from copy import copy
from itertools import cycle
import pandas as pd
from bokeh.core.enums import DashPattern
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Any, Bool, Dict, Either, Instance, List, Override, String
from bokeh.models.sources import ColumnDataSource
from . import DEFAULT_PALETTE
from .data_source import ChartDataSource
from .properties import ColumnLabel
from .utils import marker_types
from .stats import Bins
class AttrSpec(HasProps):
"""A container for assigning attributes to values and retrieving them as needed.
A special function this provides is automatically handling cases where the provided
iterator is too short compared to the distinct values provided.
Once created as attr_spec, you can do attr_spec[data_label], where data_label must
be a one dimensional tuple of values, representing the unique group in the data.
See the :meth:`AttrSpec.setup` method for the primary way to provide an existing
AttrSpec with data and column values and update all derived property values.
"""
data = Instance(ColumnDataSource)
iterable = List(Any, default=None)
attrname = String(help='Name of the attribute the spec provides.')
columns = Either(ColumnLabel, List(ColumnLabel), help="""
The label or list of column labels that correspond to the columns that will be
used to find all distinct values (single column) or combination of values (
multiple columns) to then assign a unique attribute to. If not enough unique
attribute values are found, then the attribute values will be cycled.
""")
default = Any(default=None, help="""
The default value for the attribute, which is used if no column is assigned to
the attribute for plotting. If the default value is not provided, the first
value in the `iterable` property is used.
""")
attr_map = Dict(Any, Any, help="""
Created by the attribute specification when `iterable` and `data` are
available. The `attr_map` will include a mapping between the distinct value(s)
found in `columns` and the attribute value that has been assigned.
""")
items = Any(default=None, help="""
The attribute specification calculates this list of distinct values that are
found in `columns` of `data`.
""")
sort = Bool(default=True, help="""
A boolean flag to tell the attribute specification to sort `items`, when it is
calculated. This affects which value of `iterable` is assigned to each distinct
value in `items`.
""")
ascending = Bool(default=True, help="""
A boolean flag to tell the attribute specification how to sort `items` if the
`sort` property is set to `True`. The default setting for `ascending` is `True`.
""")
bins = Instance(Bins, help="""
If an attribute spec is binning data, so that we can map one value in the
`iterable` to one value in `items`, then this attribute will contain an instance
of the Bins stat. This is used to create unique labels for each bin, which is
then used for `items` instead of the actual unique values in `columns`.
""")
def __init__(self, columns=None, df=None, iterable=None, default=None,
items=None, **properties):
"""Create a lazy evaluated attribute specification.
Args:
columns: a list of column labels
df(:class:`~pandas.DataFrame`): the data source for the attribute spec.
iterable: an iterable of distinct attribute values
default: a value to use as the default attribute when no columns are passed
items: the distinct values in columns. If items is provided as input,
then the values provided are used instead of being calculated. This can
be used to force a specific order for assignment.
**properties: other properties to pass to parent :class:`HasProps`
"""
properties['columns'] = self._ensure_list(columns)
if df is not None:
properties['data'] = ColumnDataSource(df)
if default is None and iterable is not None:
default_iter = copy(iterable)
properties['default'] = next(iter(default_iter))
elif default is not None:
properties['default'] = default
if iterable is not None:
properties['iterable'] = iterable
if items is not None:
properties['items'] = items
super(AttrSpec, self).__init__(**properties)
if self.default is None and self.iterable is not None:
self.default = next(iter(copy(self.iterable)))
if self.data is not None and self.columns is not None:
if df is None:
df = self.data.to_df()
self._generate_items(df, columns=self.columns)
if self.items is not None and self.iterable is not None:
self.attr_map = self._create_attr_map()
@staticmethod
def _ensure_list(attr):
"""Always returns a list with the provided value. Returns the value if a list."""
if isinstance(attr, str):
return [attr]
elif isinstance(attr, tuple):
return list(attr)
else:
return attr
@staticmethod
def _ensure_tuple(attr):
"""Return tuple with the provided value. Returns the value if a tuple."""
if not isinstance(attr, tuple):
return (attr,)
else:
return attr
def _setup_default(self):
"""Stores the first value of iterable into `default` property."""
self.default = next(self._setup_iterable())
def _setup_iterable(self):
"""Default behavior is to copy and cycle the provided iterable."""
return cycle(copy(self.iterable))
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if self.sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
df = df.sort_values(by=columns, ascending=self.ascending)
except AttributeError:
df = df.sort(columns=columns, ascending=self.ascending)
items = df[columns].drop_duplicates()
self.items = [tuple(x) for x in items.to_records(index=False)]
def _create_attr_map(self, df=None, columns=None):
"""Creates map between unique values and available attributes."""
if df is not None and columns is not None:
self._generate_items(df, columns)
iterable = self._setup_iterable()
return {item: next(iterable) for item in self._item_tuples()}
def _item_tuples(self):
return [self._ensure_tuple(item) for item in self.items]
def set_columns(self, columns):
"""Set columns property and update derived properties as needed."""
columns = self._ensure_list(columns)
if all([col in self.data.column_names for col in columns]):
self.columns = columns
else:
# we have input values other than columns
# assume this is now the iterable at this point
self.iterable = columns
self._setup_default()
def setup(self, data=None, columns=None):
"""Set the data and update derived properties as needed."""
if data is not None:
self.data = data
if columns is not None and self.data is not None:
self.set_columns(columns)
if self.columns is not None and self.data is not None:
self.attr_map = self._create_attr_map(self.data.to_df(), self.columns)
def update_data(self, data):
self.setup(data=data, columns=self.columns)
def __getitem__(self, item):
"""Lookup the attribute to use for the given unique group label."""
if not self.attr_map:
return self.default
elif self._ensure_tuple(item) not in self.attr_map.keys():
# make sure we have attr map
self.setup()
return self.attr_map[self._ensure_tuple(item)]
@property
def series(self):
if not self.attr_map:
return pd.Series()
else:
index = pd.MultiIndex.from_tuples(self._item_tuples(), names=self.columns)
return pd.Series(list(self.attr_map.values()), index=index)
class ColorAttr(AttrSpec):
"""An attribute specification for mapping unique data values to colors.
.. note::
Should be expanded to support more complex coloring options.
"""
attrname = Override(default='color')
iterable = Override(default=DEFAULT_PALETTE)
bin = Bool(default=False)
def __init__(self, **kwargs):
iterable = kwargs.pop('palette', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(ColorAttr, self).__init__(**kwargs)
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if not self.bin:
super(ColorAttr, self)._generate_items(df, columns)
else:
if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]):
self.bins = Bins(source=ColumnDataSource(df), column=columns[0],
bins=len(self.iterable), aggregate=False)
if self.sort:
self.bins.sort(ascending=self.ascending)
self.items = [bin.label[0] for bin in self.bins]
else:
raise ValueError('Binned colors can only be created for one column of \
numerical data.')
def add_bin_labels(self, data):
col = self.columns[0]
# save original values into new column
data._data[col + '_values'] = data._data[col]
for bin in self.bins:
# set all rows associated to each bin to the bin label being mapped to colors
data._data.ix[data._data[col + '_values'].isin(bin.values),
col] = bin.label[0]
data._data[col] = pd.Categorical(data._data[col], categories=list(self.items),
ordered=self.sort)
class MarkerAttr(AttrSpec):
"""An attribute specification for mapping unique data values to markers."""
attrname = Override(default='marker')
iterable = Override(default=list(marker_types.keys()))
def __init__(self, **kwargs):
iterable = kwargs.pop('markers', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(MarkerAttr, self).__init__(**kwargs)
dashes = DashPattern._values
class DashAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='dash')
iterable = Override(default=dashes)
def __init__(self, **kwargs):
iterable = kwargs.pop('dash', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(DashAttr, self).__init__(**kwargs)
class IdAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='id')
def _setup_iterable(self):
return iter(range(0, len(self.items)))
class CatAttr(AttrSpec):
"""An attribute specification for mapping unique data values to labels.
.. note::
this is a special attribute specification, which is used for defining which
labels are used for one aspect of a chart (grouping) vs another (stacking or
legend)
"""
attrname = Override(default='nest')
def __init__(self, **kwargs):
super(CatAttr, self).__init__(**kwargs)
def _setup_iterable(self):
return iter(self.items)
def get_levels(self, columns):
"""Provides a list of levels the attribute represents."""
if self.columns is not None:
levels = [columns.index(col) for col in self.columns]
return levels
else:
return []
""" Attribute Spec Functions
Convenient functions for producing attribute specifications. These would be
the interface used by end users when providing attribute specs as inputs
to the Chart.
"""
def color(columns=None, palette=None, bin=False, **kwargs):
"""Produces a ColorAttr specification for coloring groups of data based on columns.
Args:
columns (str or list(str), optional): a column or list of columns for coloring
palette (list(str), optional): a list of colors to use for assigning to unique
values in `columns`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `ColorAttr` object
"""
if palette is not None:
kwargs['palette'] = palette
kwargs['columns'] = columns
kwargs['bin'] = bin
return ColorAttr(**kwargs)
def marker(columns=None, markers=None, **kwargs):
""" Specifies detailed configuration for a marker attribute.
Args:
columns (list or str):
markers (list(str) or str): a custom list of markers. Must exist within
:data:`marker_types`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `MarkerAttr` object
"""
if markers is not None:
kwargs['markers'] = markers
kwargs['columns'] = columns
return MarkerAttr(**kwargs)
def cat(columns=None, cats=None, sort=True, ascending=True, **kwargs):
""" Specifies detailed configuration for a chart attribute that uses categoricals.
Args:
columns (list or str): the columns used to generate the categorical variable
cats (list, optional): overrides the values derived from columns
sort (bool, optional): whether to sort the categorical values (default=True)
ascending (bool, optional): whether to sort the categorical values (default=True)
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `CatAttr` object
"""
if cats is not None:
kwargs['cats'] = cats
kwargs['columns'] = columns
kwargs['sort'] = sort
kwargs['ascending'] = ascending
return CatAttr(**kwargs)
| bsd-3-clause |
slipguru/ignet | icing/externals/neighbors.py | 2 | 6293 | """Portion of file taken from sklearn.neighbors.base."""
import numpy as np
from scipy.sparse import csr_matrix, issparse
import multiprocessing as mp
import itertools
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_array
def _func(X, Y):
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='int')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = int(len(set(X[i].getVGene('set')) &
set(Y[j].getVGene('set'))) > 0 or
len(set(X[i].getJGene('set')) &
set(Y[j].getJGene('set'))) > 0)
return out
def radius_neighbors(X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
n_samples = X.shape[0]
from joblib import delayed, Parallel
from sklearn.utils import gen_even_slices
n_jobs = max(mp.cpu_count(), n_samples)
print(gen_even_slices(X.shape[0], n_jobs))
fd = delayed(_func)
dist = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, X[s])
for s in gen_even_slices(X.shape[0], n_jobs))
neigh_ind_list = [np.where(d > 0)[0] for d in np.hstack(dist)]
# print(neigh_ind_list)
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
return neigh_ind
def radius_neighbors_graph(X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = radius_neighbors(X, radius, return_distance=False)
A_data = None
else:
raise ValueError(
'Unsupported mode, must be "connectivity" '
'but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
sparse = csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples1))
import scipy
return sparse + sparse.T + scipy.sparse.eye(sparse.shape[0])
| bsd-2-clause |
numenta-ci/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| agpl-3.0 |
skrueger111/zazzie | src/scripts/cluster_ensemble.py | 2 | 4622 | # $Id: cluster_ensemble.py 3078 2016-04-06 19:46:43Z schowell $
import numpy
import time
import os
import sasmol.sasmol as sasmol
import sassie.calculate.convergence_test as convergence_test
try:
dummy = os.environ["DISPLAY"]
except:
# allows for creating plots without an xserver
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def com_clustering(pdb_file_name, dcd_file_names, output_prefix=None,
voxel_size=5.0, show=False, create=False):
'''
Clustering routine for identifying grouping structures within
an ensemble. Uses the center of mass of the entire structure to
differentiate one from another.
This may be improved by using the center of mass of a certain region.
'''
if output_prefix:
output_prefix += '_%dA' % int(voxel_size)
else:
output_prefix = '%s_cluster_%dA' % (pdb_file_name[:-4],
int(voxel_size))
mol = sasmol.SasMol(0)
mol.read_pdb(pdb_file_name)
com_coors = []
new_voxels = []
occupied_voxels = []
number_occupied_voxels = 0
voxel_set = set([])
tic = time.time()
if create:
subset_mol = sasmol.SasMol(0)
subset_mol.read_pdb(pdb_file_name)
output_dcd = subset_mol.open_dcd_write(output_prefix + '.dcd')
i = 0
for (i_dcd, dcd_file_name) in enumerate(dcd_file_names):
print 'processing dcd: %s\n' % dcd_file_name
input_dcd = mol.open_dcd_read(dcd_file_name)
number_of_frames = input_dcd[2]
for frame in xrange(number_of_frames):
mol.read_dcd_step(input_dcd, frame)
com_coors.append(mol.calccom(0))
mol.close_dcd_read(input_dcd[0])
this_dcd_new_voxels = convergence_test.count_new_spatial_voxels(
com_coors, voxel_set, voxel_size)
number_occupied_voxels += this_dcd_new_voxels
new_voxels.append(this_dcd_new_voxels)
occupied_voxels.append(number_occupied_voxels)
# select out the dcd_frames and save them to a new dcd file
if create:
voxel_list = list(voxel_set)
voxel_number = numpy.array(com_coors)/voxel_size
dcd_frames = []
for voxel in voxel_list:
v_diff = voxel_number-voxel
i_min = numpy.argmin(numpy.sqrt(v_diff[:,0]**2 + v_diff[:,1]**2
+ v_diff[:,2]**2))
dcd_frames.append(i_min)
dcd_frames_file = dcd_file_name.replace('.dcd', '_cluster_%dA.dat'
% voxel_size)
numpy.savetxt(dcd_frames_file, dcd_frames, fmt='%d')
input_dcd = subset_mol.open_dcd_read(dcd_file_name)
number_of_frames = input_dcd[2]
for frame in xrange(number_of_frames):
subset_mol.read_dcd_step(input_dcd, frame)
if frame in dcd_frames:
i += 1
subset_mol.write_dcd_step(output_dcd, 0, i)
subset_mol.close_dcd_read(input_dcd[0])
if create:
subset_mol.close_dcd_write(output_dcd)
toc = time.time() - tic
print "\ntime used: ", toc
# convergence_test.plot_convergence(new_voxels, dcd_file_names,
# occupied_voxels, output_prefix)
return number_occupied_voxels
def voxel_scan(pdb_file_name, dcd_file_names, voxel_range,
output_prefix=None, show=False, create=False):
if not output_prefix:
output_prefix = '%s_cluster' % pdb_file_name[:-4]
occupied_voxels = numpy.zeros((len(voxel_range), 2), dtype='int64')
for (i, voxel_size) in enumerate(voxel_range):
occupied_voxels[i, 0] = voxel_size
occupied_voxels[i, 1] = com_clustering(pdb_file_name, dcd_file_names,
output_prefix=output_prefix,
voxel_size=voxel_size,
show=show, create=create)
output_prefix = os.path.join(os.getcwd(), output_prefix)
out_name = output_prefix + '.dat'
numpy.savetxt(out_name, occupied_voxels, fmt='%d')
ax = plt.subplot(111)
plt.plot(occupied_voxels[:,0], occupied_voxels[:,1])
ax.set_yscale('log')
plt.xlabel(r'voxel size ($\AA$)')
plt.ylabel('occupied voxels')
plot_name = output_prefix
plt.savefig(plot_name + '.eps', dpi=400, bbox_inches='tight')
plt.savefig(plot_name + '.png', dpi=400, bbox_inches='tight')
if show:
plt.show()
| gpl-3.0 |
gpospelov/BornAgain | Examples/fit55_SpecularIntro/FitSpecularBasics.py | 1 | 3752 | """
Example demonstrates how to fit specular data.
Our sample represents twenty interchanging layers of Ti and Ni. We will fit
thicknesses of all Ti layers, assuming them being equal.
Reference data was generated with GENX for ti layers' thicknesses equal to 3 nm
"""
import numpy as np
import bornagain as ba
from bornagain import ba_fitmonitor
from matplotlib import pyplot as plt
from os import path
def get_sample(params):
"""
Creates a sample and returns it
:param params: a dictionary of optimization parameters
:return: the sample defined
"""
# substrate (Si)
si_sld_real = 2.0704e-06 # \AA^{-2}
density_si = 0.0499/ba.angstrom**3 # Si atomic number density
# layers' parameters
n_repetitions = 10
# Ni
ni_sld_real = 9.4245e-06 # \AA^{-2}
ni_thickness = 70*ba.angstrom
# Ti
ti_sld_real = -1.9493e-06 # \AA^{-2}
ti_thickness = params["ti_thickness"]
# defining materials
m_vacuum = ba.MaterialBySLD()
m_ni = ba.MaterialBySLD("Ni", ni_sld_real, 0.0)
m_ti = ba.MaterialBySLD("Ti", ti_sld_real, 0.0)
m_substrate = ba.MaterialBySLD("SiSubstrate", si_sld_real, 0.0)
# vacuum layer and substrate form multi layer
vacuum_layer = ba.Layer(m_vacuum)
ni_layer = ba.Layer(m_ni, ni_thickness)
ti_layer = ba.Layer(m_ti, ti_thickness)
substrate_layer = ba.Layer(m_substrate)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(vacuum_layer)
for i in range(n_repetitions):
multi_layer.addLayer(ti_layer)
multi_layer.addLayer(ni_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_real_data():
"""
Loading data from genx_interchanging_layers.dat
Returns a Nx2 array (N - the number of experimental data entries)
with first column being coordinates,
second one being values.
"""
if not hasattr(get_real_data, "data"):
filename = "genx_interchanging_layers.dat.gz"
filepath = path.join(path.dirname(path.realpath(__file__)), filename)
real_data = np.loadtxt(filepath, usecols=(0, 1), skiprows=3)
# translating axis values from double incident angle (degs)
# to incident angle (radians)
real_data[:, 0] *= np.pi/360
get_real_data.data = real_data
return get_real_data.data.copy()
def get_real_data_axis():
"""
Get axis coordinates of the experimental data
:return: 1D array with axis coordinates
"""
return get_real_data()[:, 0]
def get_real_data_values():
"""
Get experimental data values as a 1D array
:return: 1D array with experimental data values
"""
return get_real_data()[:, 1]
def get_simulation(params):
"""
Create and return specular simulation with its instrument defined
"""
wavelength = 1.54*ba.angstrom # beam wavelength
simulation = ba.SpecularSimulation()
scan = ba.AngularSpecScan(wavelength, get_real_data_axis())
simulation.setScan(scan)
simulation.setSample(get_sample(params))
return simulation
def run_fitting():
"""
Setup simulation and fit
"""
real_data = get_real_data_values()
fit_objective = ba.FitObjective()
fit_objective.addSimulationAndData(get_simulation, real_data, 1.0)
plot_observer = ba_fitmonitor.PlotterSpecular()
fit_objective.initPrint(10)
fit_objective.initPlot(10, plot_observer)
params = ba.Parameters()
params.add("ti_thickness",
50*ba.angstrom,
min=10*ba.angstrom,
max=60*ba.angstrom)
minimizer = ba.Minimizer()
result = minimizer.minimize(fit_objective.evaluate, params)
fit_objective.finalize(result)
if __name__ == '__main__':
run_fitting()
plt.show()
| gpl-3.0 |
meteorcloudy/tensorflow | tensorflow/python/estimator/inputs/pandas_io_test.py | 7 | 8390 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
tawsifkhan/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
SuLab/fiSSEA | pandasVCF/src/multi_sample/Vcf_metadata.py | 1 | 1096 | import os,sys,gzip
import pandas as pd
class Vcf_metadata(object):
'''
This class parses a VCF header into a pandas
dataframe object. It recognizes gzip and uncompressed
file formats. This function assumes the header does not
extent past 5000 lines
'''
def __init__(self, filename):
if filename.endswith('.gz'):
self.compression = 'gzip'
if filename+'.tbi' in os.listdir(os.path.split(filename)[0]):
header_lines = os.popen('tabix -H ' + filename).readlines()
self.header = [l.replace('#CHROM','CHROM') for l in header_lines if l.startswith('#')]
os.system('tabix -p vcf ' + filename)
header_lines = os.popen('tabix -H ' + filename).readlines()
self.header = [l for l in header_lines if l.startswith('#')]
else:
self.compression = ''
header_lines = os.popen('head -5000 ' + filename).readlines()
self.header = [l for l in header_lines if l.startswith('#')]
| apache-2.0 |
lxneng/incubator-airflow | airflow/contrib/hooks/pinot_hook.py | 21 | 3483 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import six
from pinotdb import connect
from airflow.hooks.dbapi_hook import DbApiHook
class PinotDbApiHook(DbApiHook):
"""
Connect to pinot db(https://github.com/linkedin/pinot) to issue pql
"""
conn_name_attr = 'pinot_broker_conn_id'
default_conn_name = 'pinot_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super(PinotDbApiHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to pinot broker through pinot dbqpi.
"""
conn = self.get_connection(self.pinot_broker_conn_id)
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/pql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to pinot '
'broker on {host}'.format(host=conn.host))
return pinot_broker_conn
def get_uri(self):
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/pql
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'pql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def get_records(self, sql):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str
"""
if six.PY2:
sql = sql.encode('utf-8')
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
if six.PY2:
sql = sql.encode('utf-8')
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone()
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
| apache-2.0 |
phdowling/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
sinhrks/scikit-learn | sklearn/gaussian_process/tests/test_gpc.py | 28 | 6061 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater, assert_equal,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4)
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0])
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(9):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
""" Test that GPC can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 1000 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(1000):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
""" Test GPC for multi-class classification problems. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
""" Test that multi-class GPC produces identical results with n_jobs>1. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.