repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
herilalaina/scikit-learn
|
benchmarks/bench_lof.py
|
34
|
3492
|
"""
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
Note that LocalOutlierFactor is not meant to predict on a test set and its
performance is assessed in an outlier detection context:
1. The model is trained on the whole dataset which is assumed to contain
outliers.
2. The ROC curve is computed on the same dataset using the knowledge of the
labels.
In this context there is no need to shuffle the dataset because the model
is trained and tested on the whole dataset. The randomness of this benchmark
is only caused by the random selection of anomalies in the SA dataset.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
print(__doc__)
random_state = 2 # to control the random selection of anomalies in SA
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
plt.figure()
for dataset_name in datasets:
# loading and vectorization
print('loading data')
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, percent10=True,
random_state=random_state)
X = dataset.data
y = dataset.target
if dataset_name == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == 'forestcover':
dataset = fetch_covtype()
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dataset_name == 'SF':
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
x2 = lb.fit_transform(X[:, 2].astype(str))
x3 = lb.fit_transform(X[:, 3].astype(str))
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b'normal.').astype(int)
if dataset_name == 'http' or dataset_name == 'smtp':
y = (y != b'normal.').astype(int)
X = X.astype(float)
print('LocalOutlierFactor processing...')
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X)
fit_time = time() - tstart
scoring = -model.negative_outlier_factor_ # the lower, the more normal
fpr, tpr, thresholds = roc_curve(y, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1,
label=('ROC for %s (area = %0.3f, train-time: %0.2fs)'
% (dataset_name, AUC, fit_time)))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
nelango/ViralityAnalysis
|
model/lib/sklearn/ensemble/tests/test_forest.py
|
6
|
39405
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils import warnings
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
|
mit
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/traitlets/config/loader.py
|
3
|
28215
|
# encoding: utf-8
"""A simple configuration system."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import argparse
import copy
import logging
import os
import re
import sys
import json
from ast import literal_eval
from ipython_genutils.path import filefind
from ipython_genutils import py3compat
from ipython_genutils.encoding import DEFAULT_ENCODING
from ipython_genutils.py3compat import unicode_type, iteritems
from traitlets.traitlets import HasTraits, List, Any
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in iteritems(other):
if k not in self:
to_update[k] = copy.deepcopy(v)
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = copy.deepcopy(v)
self.update(to_update)
def collisions(self, other):
"""Check for collisions between two config objects.
Returns a dict of the form {"Class": {"trait": "collision message"}}`,
indicating which values have been ignored.
An empty dict indicates no collisions.
"""
collisions = {}
for section in self:
if section not in other:
continue
mine = self[section]
theirs = other[section]
for key in mine:
if key in theirs and mine[key] != theirs[key]:
collisions.setdefault(section, {})
collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
return collisions
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
new_config = type(self)()
for key, value in self.items():
if isinstance(value, (Config, LazyConfigValue)):
# deep copy config objects
value = copy.deepcopy(value, memo)
elif type(value) in {dict, list, set, tuple}:
# shallow copy plain container traits
value = copy.copy(value)
new_config[key] = value
return new_config
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from traitlets.log import get_logger
return get_logger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`traitlets.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
self.log.warning("Unrecognized JSON config file version, assuming version {}".format(version))
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def load_subconfig(self, fname, path=None):
"""Injected into config file namespace as load_subconfig"""
if path is None:
path = self.path
loader = self.__class__(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there,
# treat it as an empty config file.
pass
else:
self.config.merge(sub_config)
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
def get_config():
"""Unnecessary now, but a deprecation warning is more trouble than it's worth."""
return self.config
namespace = dict(
c=self.config,
load_subconfig=self.load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with literal_eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = literal_eval(rhs)
except (NameError, SyntaxError, ValueError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in iteritems(cfg):
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from traitlets.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stdin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warning("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in iteritems(vars(self.parsed_data)):
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in iteritems(aliases):
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=unicode_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=unicode_type, dest=value, nargs=nargs)
for key, (value, help) in iteritems(flags):
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in iteritems(vars(self.parsed_data)):
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
|
artistic-2.0
|
lancezlin/ml_on_lc
|
CNN/lenet-mnist/lenet_mnist.py
|
1
|
3540
|
'''
USAGE
python lenet_mnist.py --save-model 1 --weights output/lenet_weights.hdf5
python lenet_mnist.py --load-model 1 --weights output/lenet_weights.hdf5
'''
# import the necessary packages
import numpy as np
import argparse
import cv2
from pyimagesearch.cnn.networks import LeNet
from sklearn.cross_validation import train_test_split
from sklearn import datasets
from keras.optimizers import SGD
from keras.utils import np_utils
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--save-model", type=int, default=-1,
help="(optional) whether or not model should be saved to disk")
ap.add_argument("-l", "--load-model", type=int, default=-1,
help="(optional) whether or not pre-trained model should be loaded")
ap.add_argument("-w", "--weights", type=str,
help="(optional) path to weights file")
args = vars(ap.parse_args())
# grab the MNIST dataset (if this is your first time running this
# script, the download may take a minute -- the 55MB MNIST dataset
# will be downloaded)
print("[INFO] downloading MNIST...")
dataset = datasets.fetch_mldata("MNIST Original")
# reshape the MNIST dataset from a flat list of 784-dim vectors, to
# 28 x 28 pixel images, then scale the data to the range [0, 1.0]
# and construct the training and testing splits
data = dataset.data.reshape((dataset.data.shape[0], 28, 28))
data = data[:, np.newaxis, :, :]
(trainData, testData, trainLabels, testLabels) = train_test_split(
data / 255.0, dataset.target.astype("int"), test_size=0.33)
# transform the training and testing labels into vectors in the
# range [0, classes] -- this generates a vector for each label,
# where the index of the label is set to `1` and all other entries
# to `0`; in the case of MNIST, there are 10 class labels
trainLabels = np_utils.to_categorical(trainLabels, 10)
testLabels = np_utils.to_categorical(testLabels, 10)
# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=0.01)
model = LeNet.build(width=28, height=28, depth=1, classes=10,
weightsPath=args["weights"] if args["load_model"] > 0 else None)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
# only train and evaluate the model if we *are not* loading a
# pre-existing model
if args["load_model"] < 0:
print("[INFO] training...")
model.fit(trainData, trainLabels, batch_size=128, nb_epoch=20,
verbose=1)
# show the accuracy on the testing set
print("[INFO] evaluating...")
(loss, accuracy) = model.evaluate(testData, testLabels,
batch_size=128, verbose=1)
print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))
# check to see if the model should be saved to file
if args["save_model"] > 0:
print("[INFO] dumping weights to file...")
model.save_weights(args["weights"], overwrite=True)
# randomly select a few testing digits
for i in np.random.choice(np.arange(0, len(testLabels)), size=(10,)):
# classify the digit
probs = model.predict(testData[np.newaxis, i])
prediction = probs.argmax(axis=1)
# resize the image from a 28 x 28 image to a 96 x 96 image so we
# can better see it
image = (testData[i][0] * 255).astype("uint8")
image = cv2.merge([image] * 3)
image = cv2.resize(image, (96, 96), interpolation=cv2.INTER_LINEAR)
cv2.putText(image, str(prediction[0]), (5, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# show the image and prediction
print("[INFO] Predicted: {}, Actual: {}".format(prediction[0],
np.argmax(testLabels[i])))
cv2.imshow("Digit", image)
cv2.waitKey(3000)
|
mit
|
chris-chris/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py
|
62
|
2343
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
jreback/pandas
|
pandas/tests/reductions/test_reductions.py
|
2
|
47335
|
from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
NaT,
Period,
PeriodIndex,
RangeIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
isna,
timedelta_range,
to_timedelta,
)
import pandas._testing as tm
from pandas.core import nanops
def get_objs():
indexes = [
tm.makeBoolIndex(10, name="a"),
tm.makeIntIndex(10, name="a"),
tm.makeFloatIndex(10, name="a"),
tm.makeDateIndex(10, name="a"),
tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern"),
tm.makePeriodIndex(10, name="a"),
tm.makeStringIndex(10, name="a"),
tm.makeUnicodeIndex(10, name="a"),
]
arr = np.random.randn(10)
series = [Series(arr, index=idx, name="a") for idx in indexes]
objs = indexes + series
return objs
objs = get_objs()
class TestReductions:
@pytest.mark.parametrize("opname", ["max", "min"])
@pytest.mark.parametrize("obj", objs)
def test_ops(self, opname, obj):
result = getattr(obj, opname)()
if not isinstance(obj, PeriodIndex):
expected = getattr(obj.values, opname)()
else:
expected = Period(ordinal=getattr(obj.asi8, opname)(), freq=obj.freq)
if getattr(obj, "tz", None) is not None:
# We need to de-localize before comparing to the numpy-produced result
expected = expected.astype("M8[ns]").astype("int64")
assert result.value == expected
else:
assert result == expected
@pytest.mark.parametrize("opname", ["max", "min"])
@pytest.mark.parametrize(
"dtype, val",
[
("object", 2.0),
("float64", 2.0),
("datetime64[ns]", datetime(2011, 11, 1)),
("Int64", 2),
("boolean", True),
],
)
def test_nanminmax(self, opname, dtype, val, index_or_series):
# GH#7261
klass = index_or_series
if dtype in ["Int64", "boolean"] and klass == pd.Index:
pytest.skip("EAs can't yet be stored in an index")
def check_missing(res):
if dtype == "datetime64[ns]":
return res is pd.NaT
elif dtype == "Int64":
return res is pd.NA
else:
return pd.isna(res)
obj = klass([None], dtype=dtype)
assert check_missing(getattr(obj, opname)())
assert check_missing(getattr(obj, opname)(skipna=False))
obj = klass([], dtype=dtype)
assert check_missing(getattr(obj, opname)())
assert check_missing(getattr(obj, opname)(skipna=False))
if dtype == "object":
# generic test with object only works for empty / all NaN
return
obj = klass([None, val], dtype=dtype)
assert getattr(obj, opname)() == val
assert check_missing(getattr(obj, opname)(skipna=False))
obj = klass([None, val, None], dtype=dtype)
assert getattr(obj, opname)() == val
assert check_missing(getattr(obj, opname)(skipna=False))
@pytest.mark.parametrize("opname", ["max", "min"])
def test_nanargminmax(self, opname, index_or_series):
# GH#7261
klass = index_or_series
arg_op = "arg" + opname if klass is Index else "idx" + opname
obj = klass([pd.NaT, datetime(2011, 11, 1)])
assert getattr(obj, arg_op)() == 1
result = getattr(obj, arg_op)(skipna=False)
if klass is Series:
assert np.isnan(result)
else:
assert result == -1
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, arg_op)() == 1
result = getattr(obj, arg_op)(skipna=False)
if klass is Series:
assert np.isnan(result)
else:
assert result == -1
@pytest.mark.parametrize("opname", ["max", "min"])
@pytest.mark.parametrize("dtype", ["M8[ns]", "datetime64[ns, UTC]"])
def test_nanops_empty_object(self, opname, index_or_series, dtype):
klass = index_or_series
arg_op = "arg" + opname if klass is Index else "idx" + opname
obj = klass([], dtype=dtype)
assert getattr(obj, opname)() is pd.NaT
assert getattr(obj, opname)(skipna=False) is pd.NaT
with pytest.raises(ValueError, match="empty sequence"):
getattr(obj, arg_op)()
with pytest.raises(ValueError, match="empty sequence"):
getattr(obj, arg_op)(skipna=False)
def test_argminmax(self):
obj = Index(np.arange(5, dtype="int64"))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), pd.NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([pd.NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
@pytest.mark.parametrize("op, expected_col", [["max", "a"], ["min", "b"]])
def test_same_tz_min_max_axis_1(self, op, expected_col):
# GH 10390
df = DataFrame(
pd.date_range("2016-01-01 00:00:00", periods=3, tz="UTC"), columns=["a"]
)
df["b"] = df.a.subtract(Timedelta(seconds=3600))
result = getattr(df, op)(axis=1)
expected = df[expected_col].rename(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["maximum", "minimum"])
def test_numpy_reduction_with_tz_aware_dtype(self, tz_aware_fixture, func):
# GH 15552
tz = tz_aware_fixture
arg = pd.to_datetime(["2019"]).tz_localize(tz)
expected = Series(arg)
result = getattr(np, func)(expected, expected)
tm.assert_series_equal(result, expected)
class TestIndexReductions:
# Note: the name TestIndexReductions indicates these tests
# were moved from a Index-specific test file, _not_ that these tests are
# intended long-term to be Index-specific
@pytest.mark.parametrize(
"start,stop,step",
[
(0, 400, 3),
(500, 0, -6),
(-(10 ** 6), 10 ** 6, 4),
(10 ** 6, -(10 ** 6), -4),
(0, 10, 20),
],
)
def test_max_min_range(self, start, stop, step):
# GH#17607
idx = RangeIndex(start, stop, step)
expected = idx._int64index.max()
result = idx.max()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.max(skipna=False)
assert result2 == expected
expected = idx._int64index.min()
result = idx.min()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.min(skipna=False)
assert result2 == expected
# empty
idx = RangeIndex(start, stop, -step)
assert isna(idx.max())
assert isna(idx.min())
def test_minmax_timedelta64(self):
# monotonic
idx1 = TimedeltaIndex(["1 days", "2 days", "3 days"])
assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(["1 days", np.nan, "3 days", "NaT"])
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timedelta("1 days")
assert idx.max() == Timedelta("3 days")
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
def test_minmax_timedelta_empty_or_na(self, op):
# Return NaT
obj = TimedeltaIndex([])
assert getattr(obj, op)() is pd.NaT
obj = TimedeltaIndex([pd.NaT])
assert getattr(obj, op)() is pd.NaT
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
assert getattr(obj, op)() is pd.NaT
def test_numpy_minmax_timedelta64(self):
td = timedelta_range("16815 days", "16820 days", freq="D")
assert np.min(td) == Timedelta("16815 days")
assert np.max(td) == Timedelta("16820 days")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(td, out=0)
def test_timedelta_ops(self):
# GH#4984
# make sure ops return Timedelta
s = Series(
[Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)]
)
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(0.1)
expected = Timedelta(np.timedelta64(2600, "ms"))
assert result == expected
result = td.median()
expected = to_timedelta("00:00:09")
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH#6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta("00:01:21")
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# GH#10040
# make sure NaT is properly handled by median()
s = Series([Timestamp("2015-02-03"), Timestamp("2015-02-07")])
assert s.diff().median() == timedelta(days=4)
s = Series(
[Timestamp("2015-02-03"), Timestamp("2015-02-07"), Timestamp("2015-02-15")]
)
assert s.diff().median() == timedelta(days=6)
@pytest.mark.parametrize("opname", ["skew", "kurt", "sem", "prod", "var"])
def test_invalid_td64_reductions(self, opname):
s = Series(
[Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)]
)
td = s.diff()
msg = "|".join(
[
f"reduction operation '{opname}' not allowed for this dtype",
rf"cannot perform {opname} with type timedelta64\[ns\]",
f"'TimedeltaArray' does not implement reduction '{opname}'",
]
)
with pytest.raises(TypeError, match=msg):
getattr(td, opname)()
with pytest.raises(TypeError, match=msg):
getattr(td.to_frame(), opname)(numeric_only=False)
def test_minmax_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# monotonic
idx1 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = DatetimeIndex(
["2011-01-01", pd.NaT, "2011-01-03", "2011-01-02", pd.NaT], tz=tz
)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp("2011-01-01", tz=tz)
assert idx.max() == Timestamp("2011-01-03", tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
def test_minmax_nat_datetime64(self, op):
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax_integer(self):
# GH#26125
idx = Index([1, 2, 3])
expected = idx.values.max()
result = np.max(idx)
assert result == expected
expected = idx.values.min()
result = np.min(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
expected = idx.values.argmax()
result = np.argmax(idx)
assert result == expected
expected = idx.values.argmin()
result = np.argmin(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(idx, out=0)
def test_numpy_minmax_range(self):
# GH#26125
idx = RangeIndex(0, 10, 3)
expected = idx._int64index.max()
result = np.max(idx)
assert result == expected
expected = idx._int64index.min()
result = np.min(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
# No need to test again argmax/argmin compat since the implementation
# is the same as basic integer index
def test_numpy_minmax_datetime64(self):
dr = pd.date_range(start="2016-01-15", end="2016-01-20")
assert np.min(dr) == Timestamp("2016-01-15 00:00:00", freq="D")
assert np.max(dr) == Timestamp("2016-01-20 00:00:00", freq="D")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(dr, out=0)
def test_minmax_period(self):
# monotonic
idx1 = PeriodIndex([NaT, "2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
assert not idx1.is_monotonic
assert idx1[1:].is_monotonic
# non-monotonic
idx2 = PeriodIndex(
["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], freq="D"
)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Period("2011-01-01", freq="D")
assert idx.max() == Period("2011-01-03", freq="D")
assert idx1.argmin() == 1
assert idx2.argmin() == 0
assert idx1.argmax() == 3
assert idx2.argmax() == 2
for op in ["min", "max"]:
# Return NaT
obj = PeriodIndex([], freq="M")
result = getattr(obj, op)()
assert result is NaT
obj = PeriodIndex([NaT], freq="M")
result = getattr(obj, op)()
assert result is NaT
obj = PeriodIndex([NaT, NaT, NaT], freq="M")
result = getattr(obj, op)()
assert result is NaT
def test_numpy_minmax_period(self):
pr = pd.period_range(start="2016-01-15", end="2016-01-20")
assert np.min(pr) == Period("2016-01-15", freq="D")
assert np.max(pr) == Period("2016-01-20", freq="D")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(pr, out=0)
assert np.argmin(pr) == 0
assert np.argmax(pr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(pr, out=0)
def test_min_max_categorical(self):
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
msg = (
r"Categorical is not ordered for operation min\n"
r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n"
)
with pytest.raises(TypeError, match=msg):
ci.min()
msg = (
r"Categorical is not ordered for operation max\n"
r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n"
)
with pytest.raises(TypeError, match=msg):
ci.max()
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=True)
assert ci.min() == "c"
assert ci.max() == "b"
class TestSeriesReductions:
# Note: the name TestSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert np.isinf(s.sum())
arr = np.random.randn(100, 100).astype("f4")
arr[:, 2] = np.inf
with pd.option_context("mode.use_inf_as_na", True):
tm.assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
assert np.isinf(res).all()
@pytest.mark.parametrize("dtype", ["float64", "Int64", "boolean", "object"])
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty(self, method, unit, use_bottleneck, dtype):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#9422 / GH#18921
# Entirely empty
s = Series([], dtype=dtype)
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert pd.isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# Skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert pd.isna(result)
result = getattr(s, method)(skipna=False, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=False, min_count=1)
assert pd.isna(result)
# All-NA
s = Series([np.nan], dtype=dtype)
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert pd.isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert pd.isna(result)
# Mix of valid, empty
s = Series([np.nan, 1], dtype=dtype)
# Default
result = getattr(s, method)()
assert result == 1.0
# Explicit
result = getattr(s, method)(min_count=0)
assert result == 1.0
result = getattr(s, method)(min_count=1)
assert result == 1.0
# Skipna
result = getattr(s, method)(skipna=True)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=0)
assert result == 1.0
# GH#844 (changed in GH#9422)
df = DataFrame(np.empty((10, 0)), dtype=dtype)
assert (getattr(df, method)(1) == unit).all()
s = Series([1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
result = getattr(s, method)(skipna=False, min_count=2)
assert pd.isna(result)
s = Series([np.nan], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
s = Series([np.nan, 1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert pd.isna(result)
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty_multi(self, method, unit):
s = Series(
[1, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product([("a", "b"), (0, 1)]),
)
# 1 / 0 by default
result = getattr(s, method)(level=0)
expected = Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(s, method)(level=0, min_count=0)
expected = Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=1
result = getattr(s, method)(level=0, min_count=1)
expected = Series([1, np.nan], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["mean"])
@pytest.mark.parametrize("dtype", ["Float64", "Int64", "boolean"])
def test_ops_consistency_on_empty_nullable(self, method, dtype):
# GH#34814
# consistency for nullable dtypes on empty or ALL-NA mean
# empty series
eser = Series([], dtype=dtype)
result = getattr(eser, method)()
assert result is pd.NA
# ALL-NA series
nser = Series([np.nan], dtype=dtype)
result = getattr(nser, method)()
assert result is pd.NA
@pytest.mark.parametrize("method", ["mean", "median", "std", "var"])
def test_ops_consistency_on_empty(self, method):
# GH#7869
# consistency on empty
# float
result = getattr(Series(dtype=float), method)()
assert pd.isna(result)
# timedelta64[ns]
tdser = Series([], dtype="m8[ns]")
if method == "var":
msg = "|".join(
[
"operation 'var' not allowed",
r"cannot perform var with type timedelta64\[ns\]",
"'TimedeltaArray' does not implement reduction 'var'",
]
)
with pytest.raises(TypeError, match=msg):
getattr(tdser, method)()
else:
result = getattr(tdser, method)()
assert result is pd.NaT
def test_nansum_buglet(self):
ser = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(ser)
tm.assert_almost_equal(result, 1)
@pytest.mark.parametrize("use_bottleneck", [True, False])
def test_sum_overflow(self, use_bottleneck):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#6915
# overflowing on the smaller int dtypes
for dtype in ["int32", "int64"]:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert int(result) == v.sum(dtype="int64")
result = s.min(skipna=False)
assert int(result) == 0
result = s.max(skipna=False)
assert int(result) == v[-1]
for dtype in ["float32", "float64"]:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert result == v.sum(dtype=dtype)
result = s.min(skipna=False)
assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
assert np.allclose(float(result), v[-1])
def test_empty_timeseries_reductions_return_nat(self):
# covers GH#11245
for dtype in ("m8[ns]", "m8[ns]", "M8[ns]", "M8[ns, UTC]"):
assert Series([], dtype=dtype).min() is pd.NaT
assert Series([], dtype=dtype).max() is pd.NaT
assert Series([], dtype=dtype).min(skipna=False) is pd.NaT
assert Series([], dtype=dtype).max(skipna=False) is pd.NaT
def test_numpy_argmin(self):
# See GH#16830
data = np.arange(1, 11)
s = Series(data, index=data)
result = np.argmin(s)
expected = np.argmin(data)
assert result == expected
result = s.argmin()
assert result == expected
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmin(s, out=data)
def test_numpy_argmax(self):
# See GH#16830
data = np.arange(1, 11)
s = Series(data, index=data)
result = np.argmax(s)
expected = np.argmax(data)
assert result == expected
result = s.argmax()
assert result == expected
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmax(s, out=data)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
string_series = tm.makeStringSeries().rename("series")
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmin()] == string_series.min()
assert pd.isna(string_series.idxmin(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmin()] == nona.min()
assert nona.index.values.tolist().index(nona.idxmin()) == nona.values.argmin()
# all NaNs
allna = string_series * np.nan
assert pd.isna(allna.idxmin())
# datetime64[ns]
s = Series(pd.date_range("20130102", periods=6))
result = s.idxmin()
assert result == 0
s[0] = np.nan
result = s.idxmin()
assert result == 1
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
string_series = tm.makeStringSeries().rename("series")
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmax()] == string_series.max()
assert pd.isna(string_series.idxmax(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmax()] == nona.max()
assert nona.index.values.tolist().index(nona.idxmax()) == nona.values.argmax()
# all NaNs
allna = string_series * np.nan
assert pd.isna(allna.idxmax())
from pandas import date_range
s = Series(date_range("20130102", periods=6))
result = s.idxmax()
assert result == 5
s[5] = np.nan
result = s.idxmax()
assert result == 4
# Float64Index
# GH#5914
s = Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
s = Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
assert not bool_series.all()
assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(["abc", True])
assert "abc" == s.any() # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
assert s1.all(skipna=False) # nan && True => True
assert s1.all(skipna=True)
assert np.isnan(s2.any(skipna=False)) # nan || False => nan
assert not s2.any(skipna=True)
# Check level.
s = Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2])
tm.assert_series_equal(s.all(level=0), Series([False, True, False]))
tm.assert_series_equal(s.any(level=0), Series([False, True, True]))
msg = "Option bool_only is not implemented with option level"
with pytest.raises(NotImplementedError, match=msg):
s.any(bool_only=True, level=0)
with pytest.raises(NotImplementedError, match=msg):
s.all(bool_only=True, level=0)
# bool_only is not implemented alone.
# TODO GH38810 change this error message to:
# "Series.any does not implement bool_only"
msg = "Series.any does not implement numeric_only"
with pytest.raises(NotImplementedError, match=msg):
s.any(bool_only=True)
msg = "Series.all does not implement numeric_only."
with pytest.raises(NotImplementedError, match=msg):
s.all(bool_only=True)
def test_all_any_boolean(self):
# Check skipna, with boolean type
s1 = Series([pd.NA, True], dtype="boolean")
s2 = Series([pd.NA, False], dtype="boolean")
assert s1.all(skipna=False) is pd.NA # NA && True => NA
assert s1.all(skipna=True)
assert s2.any(skipna=False) is pd.NA # NA || False => NA
assert not s2.any(skipna=True)
# GH-33253: all True / all False values buggy with skipna=False
s3 = Series([True, True], dtype="boolean")
s4 = Series([False, False], dtype="boolean")
assert s3.all(skipna=False)
assert not s4.any(skipna=False)
# Check level TODO(GH-33449) result should also be boolean
s = Series(
[False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2],
dtype="boolean",
)
tm.assert_series_equal(s.all(level=0), Series([False, True, False]))
tm.assert_series_equal(s.any(level=0), Series([False, True, True]))
def test_any_axis1_bool_only(self):
# GH#32432
df = DataFrame({"A": [True, False], "B": [1, 2]})
result = df.any(axis=1, bool_only=True)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_any_all_datetimelike(self):
# GH#38723 these may not be the desired long-term behavior (GH#34479)
# but in the interim should be internally consistent
dta = date_range("1995-01-02", periods=3)._data
ser = Series(dta)
df = DataFrame(ser)
assert dta.all()
assert dta.any()
assert ser.all()
assert ser.any()
assert df.any().all()
assert df.all().all()
dta = dta.tz_localize("UTC")
ser = Series(dta)
df = DataFrame(ser)
assert dta.all()
assert dta.any()
assert ser.all()
assert ser.any()
assert df.any().all()
assert df.all().all()
tda = dta - dta[0]
ser = Series(tda)
df = DataFrame(ser)
assert tda.any()
assert not tda.all()
assert ser.any()
assert not ser.all()
assert df.any().all()
assert not df.all().any()
def test_timedelta64_analytics(self):
# index min/max
dti = pd.date_range("2012-1-1", periods=3, freq="D")
td = Series(dti) - Timestamp("20120101")
result = td.idxmin()
assert result == 0
result = td.idxmax()
assert result == 2
# GH#2982
# with NaT
td[0] = np.nan
result = td.idxmin()
assert result == 1
result = td.idxmax()
assert result == 2
# abs
s1 = Series(pd.date_range("20120101", periods=3))
s2 = Series(pd.date_range("20120102", periods=3))
expected = Series(s2 - s1)
result = np.abs(s1 - s2)
tm.assert_series_equal(result, expected)
result = (s1 - s2).abs()
tm.assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta("2 days")
assert result == expected
result = td.min()
expected = Timedelta("1 days")
assert result == expected
@pytest.mark.parametrize(
"test_input,error_type",
[
(Series([], dtype="float64"), ValueError),
# For strings, or any Series with dtype 'O'
(Series(["foo", "bar", "baz"]), TypeError),
(Series([(1,), (2,)]), TypeError),
# For mixed data types
(Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"]), TypeError),
],
)
def test_assert_idxminmax_raises(self, test_input, error_type):
"""
Cases where ``Series.argmax`` and related should raise an exception
"""
msg = (
"reduction operation 'argmin' not allowed for this dtype|"
"attempt to get argmin of an empty sequence"
)
with pytest.raises(error_type, match=msg):
test_input.idxmin()
with pytest.raises(error_type, match=msg):
test_input.idxmin(skipna=False)
msg = (
"reduction operation 'argmax' not allowed for this dtype|"
"attempt to get argmax of an empty sequence"
)
with pytest.raises(error_type, match=msg):
test_input.idxmax()
with pytest.raises(error_type, match=msg):
test_input.idxmax(skipna=False)
def test_idxminmax_with_inf(self):
# For numeric data with NA and Inf (GH #13595)
s = Series([0, -np.inf, np.inf, np.nan])
assert s.idxmin() == 1
assert np.isnan(s.idxmin(skipna=False))
assert s.idxmax() == 2
assert np.isnan(s.idxmax(skipna=False))
# Using old-style behavior that treats floating point nan, -inf, and
# +inf as missing
with pd.option_context("mode.use_inf_as_na", True):
assert s.idxmin() == 0
assert np.isnan(s.idxmin(skipna=False))
assert s.idxmax() == 0
np.isnan(s.idxmax(skipna=False))
class TestDatetime64SeriesReductions:
# Note: the name TestDatetime64SeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize(
"nat_ser",
[
Series([pd.NaT, pd.NaT]),
Series([pd.NaT, Timedelta("nat")]),
Series([Timedelta("nat"), Timedelta("nat")]),
],
)
def test_minmax_nat_series(self, nat_ser):
# GH#23282
assert nat_ser.min() is pd.NaT
assert nat_ser.max() is pd.NaT
assert nat_ser.min(skipna=False) is pd.NaT
assert nat_ser.max(skipna=False) is pd.NaT
@pytest.mark.parametrize(
"nat_df",
[
DataFrame([pd.NaT, pd.NaT]),
DataFrame([pd.NaT, Timedelta("nat")]),
DataFrame([Timedelta("nat"), Timedelta("nat")]),
],
)
def test_minmax_nat_dataframe(self, nat_df):
# GH#23282
assert nat_df.min()[0] is pd.NaT
assert nat_df.max()[0] is pd.NaT
assert nat_df.min(skipna=False)[0] is pd.NaT
assert nat_df.max(skipna=False)[0] is pd.NaT
def test_min_max(self):
rng = pd.date_range("1/1/2000", "12/31/2000")
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
assert isinstance(the_min, Timestamp)
assert isinstance(the_max, Timestamp)
assert the_min == rng[0]
assert the_max == rng[-1]
assert rng.min() == rng[0]
assert rng.max() == rng[-1]
def test_min_max_series(self):
rng = pd.date_range("1/1/2000", periods=10, freq="4h")
lvls = ["A", "A", "A", "B", "B", "B", "C", "C", "C", "C"]
df = DataFrame({"TS": rng, "V": np.random.randn(len(rng)), "L": lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iat[-1])
assert isinstance(result, Timestamp)
assert result == exp
result = df.TS.min()
exp = Timestamp(df.TS.iat[0])
assert isinstance(result, Timestamp)
assert result == exp
class TestCategoricalSeriesReductions:
# Note: the name TestCategoricalSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_unordered_raises(self, function):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
msg = f"Categorical is not ordered for operation {function}"
with pytest.raises(TypeError, match=msg):
getattr(cat, function)()
@pytest.mark.parametrize(
"values, categories",
[
(list("abc"), list("abc")),
(list("abc"), list("cba")),
(list("abc") + [np.nan], list("cba")),
([1, 2, 3], [3, 2, 1]),
([1, 2, 3, np.nan], [3, 2, 1]),
],
)
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_ordered(self, values, categories, function):
# GH 25303
cat = Series(Categorical(values, categories=categories, ordered=True))
result = getattr(cat, function)(skipna=True)
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_ordered_with_nan_only(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Series(Categorical([np.nan], categories=[1, 2], ordered=True))
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_skipna(self, function, skipna):
cat = Series(
Categorical(["a", "b", np.nan, "a"], categories=["b", "a"], ordered=True)
)
result = getattr(cat, function)(skipna=skipna)
if skipna is True:
expected = "b" if function == "min" else "a"
assert result == expected
else:
assert result is np.nan
class TestSeriesMode:
# Note: the name TestSeriesMode indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize(
"dropna, expected",
[(True, Series([], dtype=np.float64)), (False, Series([], dtype=np.float64))],
)
def test_mode_empty(self, dropna, expected):
s = Series([], dtype=np.float64)
result = s.mode(dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, data, expected",
[
(True, [1, 1, 1, 2], [1]),
(True, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
(False, [1, 1, 1, 2], [1]),
(False, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
],
)
@pytest.mark.parametrize(
"dt", list(np.typecodes["AllInteger"] + np.typecodes["Float"])
)
def test_mode_numerical(self, dropna, data, expected, dt):
s = Series(data, dtype=dt)
result = s.mode(dropna)
expected = Series(expected, dtype=dt)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna, expected", [(True, [1.0]), (False, [1, np.nan])])
def test_mode_numerical_nan(self, dropna, expected):
s = Series([1, 1, 2, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected1, expected2, expected3",
[(True, ["b"], ["bar"], ["nan"]), (False, ["b"], [np.nan], ["nan"])],
)
def test_mode_str_obj(self, dropna, expected1, expected2, expected3):
# Test string and object types.
data = ["a"] * 2 + ["b"] * 3
s = Series(data, dtype="c")
result = s.mode(dropna)
expected1 = Series(expected1, dtype="c")
tm.assert_series_equal(result, expected1)
data = ["foo", "bar", "bar", np.nan, np.nan, np.nan]
s = Series(data, dtype=object)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected2)
data = ["foo", "bar", "bar", np.nan, np.nan, np.nan]
s = Series(data, dtype=object).astype(str)
result = s.mode(dropna)
expected3 = Series(expected3, dtype=str)
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[(True, ["foo"], ["foo"]), (False, ["foo"], [np.nan])],
)
def test_mode_mixeddtype(self, dropna, expected1, expected2):
s = Series([1, "foo", "foo"])
result = s.mode(dropna)
expected = Series(expected1)
tm.assert_series_equal(result, expected)
s = Series([1, "foo", "foo", np.nan, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[
(
True,
["1900-05-03", "2011-01-03", "2013-01-02"],
["2011-01-03", "2013-01-02"],
),
(False, [np.nan], [np.nan, "2011-01-03", "2013-01-02"]),
],
)
def test_mode_datetime(self, dropna, expected1, expected2):
s = Series(
["2011-01-03", "2013-01-02", "1900-05-03", "nan", "nan"], dtype="M8[ns]"
)
result = s.mode(dropna)
expected1 = Series(expected1, dtype="M8[ns]")
tm.assert_series_equal(result, expected1)
s = Series(
[
"2011-01-03",
"2013-01-02",
"1900-05-03",
"2011-01-03",
"2013-01-02",
"nan",
"nan",
],
dtype="M8[ns]",
)
result = s.mode(dropna)
expected2 = Series(expected2, dtype="M8[ns]")
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[
(True, ["-1 days", "0 days", "1 days"], ["2 min", "1 day"]),
(False, [np.nan], [np.nan, "2 min", "1 day"]),
],
)
def test_mode_timedelta(self, dropna, expected1, expected2):
# gh-5986: Test timedelta types.
s = Series(
["1 days", "-1 days", "0 days", "nan", "nan"], dtype="timedelta64[ns]"
)
result = s.mode(dropna)
expected1 = Series(expected1, dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected1)
s = Series(
[
"1 day",
"1 day",
"-1 day",
"-1 day 2 min",
"2 min",
"2 min",
"nan",
"nan",
],
dtype="timedelta64[ns]",
)
result = s.mode(dropna)
expected2 = Series(expected2, dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"dropna, expected1, expected2, expected3",
[
(
True,
Categorical([1, 2], categories=[1, 2]),
Categorical(["a"], categories=[1, "a"]),
Categorical([3, 1], categories=[3, 2, 1], ordered=True),
),
(
False,
Categorical([np.nan], categories=[1, 2]),
Categorical([np.nan, "a"], categories=[1, "a"]),
Categorical([np.nan, 3, 1], categories=[3, 2, 1], ordered=True),
),
],
)
def test_mode_category(self, dropna, expected1, expected2, expected3):
s = Series(Categorical([1, 2, np.nan, np.nan]))
result = s.mode(dropna)
expected1 = Series(expected1, dtype="category")
tm.assert_series_equal(result, expected1)
s = Series(Categorical([1, "a", "a", np.nan, np.nan]))
result = s.mode(dropna)
expected2 = Series(expected2, dtype="category")
tm.assert_series_equal(result, expected2)
s = Series(
Categorical(
[1, 1, 2, 3, 3, np.nan, np.nan], categories=[3, 2, 1], ordered=True
)
)
result = s.mode(dropna)
expected3 = Series(expected3, dtype="category")
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[(True, [2 ** 63], [1, 2 ** 63]), (False, [2 ** 63], [1, 2 ** 63])],
)
def test_mode_intoverflow(self, dropna, expected1, expected2):
# Test for uint64 overflow.
s = Series([1, 2 ** 63, 2 ** 63], dtype=np.uint64)
result = s.mode(dropna)
expected1 = Series(expected1, dtype=np.uint64)
tm.assert_series_equal(result, expected1)
s = Series([1, 2 ** 63], dtype=np.uint64)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=np.uint64)
tm.assert_series_equal(result, expected2)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
expected = Series(["foo", np.nan])
s = Series([1, "foo", "foo", np.nan, np.nan])
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = s.mode(dropna=False)
result = result.sort_values().reset_index(drop=True)
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tools/tests/test_pivot.py
|
9
|
37334
|
import datetime
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import DataFrame, Series, Index, MultiIndex, Grouper
from pandas.tools.merge import concat
from pandas.tools.pivot import pivot_table, crosstab
from pandas.compat import range, u, product
import pandas.util.testing as tm
class TestPivotTable(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D', index=index, columns=columns)
table2 = self.data.pivot_table(values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
self.assertEqual(table.index.names, tuple(index))
else:
self.assertEqual(table.index.name, index[0])
if len(columns) > 1:
self.assertEqual(table.columns.names, columns)
else:
self.assertEqual(table.columns.name, columns[0])
expected = self.data.groupby(index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1,2,3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000, 2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', ['customer', 'product'], dropna=False)
pv_ind = df.pivot_table('quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([(u('A'), u('a')),
(u('A'), u('b')),
(u('A'), u('c')),
(u('A'), u('d')),
(u('B'), u('a')),
(u('B'), u('b')),
(u('B'), u('c')),
(u('B'), u('d')),
(u('C'), u('a')),
(u('C'), u('b')),
(u('C'), u('c')),
(u('C'), u('d'))])
assert_equal(pv_col.columns.values, m.values)
assert_equal(pv_ind.index.values, m.values)
def test_pass_array(self):
result = self.data.pivot_table('D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a' : ['cat', 'bat', 'cat', 'bat'], 'v' : [1,2,3,4], 'i' : ['a','b','a','b']})
self.assertEqual(f.dtypes['v'], 'int64')
z = pivot_table(f, values='v', index=['a'], columns=['i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64 = 2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a' : ['cat', 'bat', 'cat', 'bat'], 'v' : [1.5,2.5,3.5,4.5], 'i' : ['a','b','a','b']})
self.assertEqual(f.dtypes['v'], 'float64')
z = pivot_table(f, values='v', index=['a'], columns=['i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64 = 2))
tm.assert_series_equal(result, expected)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
def test_pivot_index_with_nan(self):
# GH 3588
nan = np.nan
df = DataFrame({'a':['R1', 'R2', nan, 'R4'],
'b':['C1', 'C2', 'C3' , 'C4'],
'c':[10, 15, 17, 20]})
result = df.pivot('a','b','c')
expected = DataFrame([[nan,nan,17,nan],[10,nan,nan,nan],
[nan,15,nan,nan],[nan,nan,nan,20]],
index = Index([nan,'R1','R2','R4'], name='a'),
columns = Index(['C1','C2','C3','C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a':pd.date_range('2014-02-01', periods=6, freq='D'),
'c':100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
pv = df.pivot('a', 'b', 'c')
self.assertEqual(pv.notnull().values.sum(), len(df))
for _, row in df.iterrows():
self.assertEqual(pv.loc[row['a'], row['b']], row['c'])
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), pv.T)
def test_pivot_with_tz(self):
# GH 5878
df = DataFrame({'dt1': [datetime.datetime(2013, 1, 1, 9, 0),
datetime.datetime(2013, 1, 2, 9, 0),
datetime.datetime(2013, 1, 1, 9, 0),
datetime.datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime.datetime(2014, 1, 1, 9, 0),
datetime.datetime(2014, 1, 1, 9, 0),
datetime.datetime(2014, 1, 2, 9, 0),
datetime.datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4,dtype='int64'),
'data2': np.arange(4,dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific'),
columns=exp_col)
pv = df.pivot(index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo'))
pv = df.pivot(index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.ix[:-1, margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
self.assertEqual(col_margins.name, margins_col)
result = result.sortlevel()
index_margins = result.ix[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
self.assertEqual(index_margins.name, (margins_col, ''))
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
self.assertEqual(grand_total_margins, expected_total_margins)
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
self.assertEqual(totals, self.data[value_col].mean())
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
tm.assertIsInstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
self.assertEqual(totals, self.data[item].mean())
# issue number #8349: pivot_table with margins and dictionary aggfunc
data = [
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2013,
'MONTH': 12, 'DAYS': 3, 'SALARY': 17},
{'JOB': 'Employ', 'NAME':
'Mary', 'YEAR': 2013, 'MONTH': 12, 'DAYS': 5, 'SALARY': 23},
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
'MONTH': 1, 'DAYS': 10, 'SALARY': 100},
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
'MONTH': 1, 'DAYS': 11, 'SALARY': 110},
{'JOB': 'Employ', 'NAME': 'Mary', 'YEAR': 2014,
'MONTH': 1, 'DAYS': 15, 'SALARY': 200},
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
'MONTH': 2, 'DAYS': 8, 'SALARY': 80},
{'JOB': 'Employ', 'NAME': 'Mary', 'YEAR': 2014,
'MONTH': 2, 'DAYS': 5, 'SALARY': 190},
]
df = DataFrame(data)
df = df.set_index(['JOB', 'NAME', 'YEAR', 'MONTH'], drop=False,
append=False)
result = df.pivot_table(index=['JOB', 'NAME'],
columns=['YEAR', 'MONTH'],
values=['DAYS', 'SALARY'],
aggfunc={'DAYS': 'mean', 'SALARY': 'sum'},
margins=True)
expected = df.pivot_table(index=['JOB', 'NAME'],
columns=['YEAR', 'MONTH'], values=['DAYS'],
aggfunc='mean', margins=True)
tm.assert_frame_equal(result['DAYS'], expected['DAYS'])
expected = df.pivot_table(index=['JOB', 'NAME'],
columns=['YEAR', 'MONTH'], values=['SALARY'],
aggfunc='sum', margins=True)
tm.assert_frame_equal(result['SALARY'], expected['SALARY'])
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = datetime.date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + datetime.timedelta(i) for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(datetime.date(2000, 1, 1), datetime.date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
self.assertTrue(pivoted.columns.is_monotonic)
def test_pivot_complex_aggfunc(self):
f = {'D': ['std'], 'E': ['sum']}
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
self.assertEqual(sum(result_list[:-1]), result_list[-1])
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a multi-index
result = self.data[['A', 'B', 'C']].pivot_table(index=['A', 'B'], columns='C', aggfunc=len, margins=True)
self.assertEqual(result.All.tolist(), [3.0, 1.0, 4.0, 3.0, 11.0])
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col defined
result = self.data[['A', 'B']].pivot_table(index='A', columns='B', aggfunc=len, margins=True)
self.assertEqual(result.All.tolist(), [4.0, 7.0, 11.0])
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
self.assertEqual(result.All.tolist(), [3.0, 1.0, 4.0, 3.0, 11.0])
def test_pivot_table_with_margins_set_margin_name(self):
# GH 3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with self.assertRaises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with self.assertRaises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with self.assertRaises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch' : 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date' : [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 10, 1), datetime.datetime(2013, 10, 2),
datetime.datetime(2013, 10, 1), datetime.datetime(2013, 10, 2),
datetime.datetime(2013, 12, 2), datetime.datetime(2013, 12, 2),]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3],dtype='int64').reshape(1, 3),
index=[datetime.datetime(2013, 12, 31)],
columns='Carl Joe Mark'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result,expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result,expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan]).reshape(2, 3),
index=[datetime.datetime(2013, 1, 1), datetime.datetime(2013, 7, 1)],
columns='Carl Joe Mark'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
self.assertRaises(KeyError, lambda : pivot_table(df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
self.assertRaises(KeyError, lambda : pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'), values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
self.assertRaises(ValueError, lambda : pivot_table(df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
self.assertRaises(ValueError, lambda : pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'), values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch' : 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1,3,5,1,8,1,9,3],
'Date' : [datetime.datetime(2013,11,1,13,0), datetime.datetime(2013,9,1,13,5),
datetime.datetime(2013,10,1,20,0), datetime.datetime(2013,10,2,10,0),
datetime.datetime(2013,11,1,20,0), datetime.datetime(2013,10,2,10,0),
datetime.datetime(2013,10,2,12,0), datetime.datetime(2013,12,5,14,0)],
'PayDay' : [datetime.datetime(2013,10,4,0,0), datetime.datetime(2013,10,15,13,5),
datetime.datetime(2013,9,5,20,0), datetime.datetime(2013,11,2,10,0),
datetime.datetime(2013,10,7,20,0), datetime.datetime(2013,9,5,10,0),
datetime.datetime(2013,12,30,12,0), datetime.datetime(2013,11,20,14,0),]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan, 6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan, np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime.datetime(2013, 9, 30), datetime.datetime(2013, 10, 31),
datetime.datetime(2013, 11, 30), datetime.datetime(2013, 12, 31)],
columns=[datetime.datetime(2013, 9, 30), datetime.datetime(2013, 10, 31),
datetime.datetime(2013, 11, 30), datetime.datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime.datetime(2013, 9, 30), datetime.datetime(2013, 10, 31)),
(datetime.datetime(2013, 10, 31), datetime.datetime(2013, 9, 30)),
(datetime.datetime(2013, 10, 31), datetime.datetime(2013, 11, 30)),
(datetime.datetime(2013, 10, 31), datetime.datetime(2013, 12, 31)),
(datetime.datetime(2013, 11, 30), datetime.datetime(2013, 10, 31)),
(datetime.datetime(2013, 12, 31), datetime.datetime(2013, 11, 30)),]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan, np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'], columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00', '2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6,dtype='int64'), 'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum', 'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00', '2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]], dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'], values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00', '2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6,dtype='int64'), 'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9:[2, 5]},
index=exp_idx, columns=Index([7, 8, 9],name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month, columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9:[2, 5]},
index=Index([1, 2],name='dt2'), columns=Index([7, 8, 9],name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays([[7, 7, 8, 8, 9, 9], [1, 2] * 3],names=['dt1','dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]],dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X', 'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan, 4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
class TestCrosstab(tm.TestCase):
def setUp(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
self.assertEqual(result.index.name, 'row_0')
self.assertEqual(result.columns.name, 'col_0')
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
self.assertEqual(result.index.names, ('a',))
self.assertEqual(result.columns.names, ['b', 'c'])
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
exp_cols = exp_cols.append(Series([len(df)], index=['All']))
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.ix['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'], columns='baz',
aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar', 'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one', 'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull', 'dull', 'shiny', 'shiny'], dtype=object)
res = crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')])
assert_equal(res.columns.values, m.values)
def test_categorical_margins(self):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5],[5, 6, 5.5],[3, 4, 3.5]])
expected.index = Index([0,1,'All'],name='y')
expected.columns = Index([0,1,'All'],name='z')
data = df.copy()
table = data.pivot_table('x', 'y', 'z', margins=True)
tm.assert_frame_equal(table, expected)
data = df.copy()
data.y = data.y.astype('category')
data.z = data.z.astype('category')
table = data.pivot_table('x', 'y', 'z', margins=True)
tm.assert_frame_equal(table, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
artistic-2.0
|
Vishluck/sympy
|
sympy/utilities/runtests.py
|
34
|
81153
|
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
import time
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
fast_threshold = kwargs.get('fast_threshold', None)
slow_threshold = kwargs.get('slow_threshold', None)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed,
fast_threshold=fast_threshold,
slow_threshold=slow_threshold)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if slow:
# Seed to evenly shuffle slow tests among splits
random.seed(41992450)
random.shuffle(matched)
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/physics/gaussopt.py", # raises deprecation warning
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None, fast_threshold=None, slow_threshold=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
# Defaults in seconds, from human / UX design limits
# http://www.nngroup.com/articles/response-times-3-important-limits/
#
# These defaults are *NOT* set in stone as we are measuring different
# things, so others feel free to come up with a better yardstick :)
if fast_threshold:
self._fast_threshold = float(fast_threshold)
else:
self._fast_threshold = 0.1
if slow_threshold:
self._slow_threshold = float(slow_threshold)
else:
self._slow_threshold = 10
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
elif slow:
pass
else:
random.seed(self._seed)
random.shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
reporter = self._reporter
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
reporter.import_error(filename, sys.exc_info())
return
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
reporter.entering_filename(filename, len(funcs))
raise
reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
start = time.time()
reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
reporter.test_skip(v)
elif t.__name__ == "XFail":
reporter.test_xfail()
elif t.__name__ == "XPass":
reporter.test_xpass(v)
else:
reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
reporter.test_pass()
taken = time.time() - start
if taken > self._slow_threshold:
reporter.slow_test_functions.append((f.__name__, taken))
if getattr(f, '_slow', False) and slow:
if taken < self._fast_threshold:
reporter.fast_test_functions.append((f.__name__, taken))
reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# TODO: Should these be protected?
self.slow_test_functions = []
self.fast_test_functions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if self.slow_test_functions:
self.write_center('slowest tests', '_')
sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1])
for slow_func_name, taken in sorted_slow:
print('%s - Took %.3f seconds' % (slow_func_name, taken))
if self.fast_test_functions:
self.write_center('unexpectedly fast tests', '_')
sorted_fast = sorted(self.fast_test_functions,
key=lambda r: r[1])
for fast_func_name, taken in sorted_fast:
print('%s - Took %.3f seconds' % (fast_func_name, taken))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
|
bsd-3-clause
|
kazemakase/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
AIML/scikit-learn
|
sklearn/tests/test_base.py
|
216
|
7045
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
bsd-3-clause
|
daeilkim/refinery
|
refinery/bnpy/bnpy-dev/bnpy/viz/GaussViz.py
|
1
|
5149
|
'''
GaussViz.py
Visualizing 2D covariance matrix of learned Gaussian mixture models
'''
import numpy as np
from matplotlib import pylab
Colors = [ (1,0,0), (1,0,1), (0,1,0), (0,1,1), (0,0,1), (1,0.6,0)]
def plotGauss2DFromHModel(hmodel, compListToPlot=None, compsToHighlight=None, wTHR=0.01, Colors=Colors):
''' Plot 2D contours for components in hmodel in current pylab figure
Args
-------
hmodel : bnpy HModel object
compListToPlot : array-like of integer IDs of components within hmodel
compsToHighlight : int or array-like of integer IDs to highlight
if None, all components get unique colors
if not None, only highlighted components get colors.
wTHR : float threshold on minimum weight assigned to component before it is "plottable"
'''
if compsToHighlight is not None:
compsToHighlight = np.asarray(compsToHighlight)
if compsToHighlight.ndim == 0:
compsToHighlight = np.asarray([compsToHighlight])
else:
compsToHighlight = list()
if compListToPlot is None:
compListToPlot = np.arange(0, hmodel.allocModel.K)
try:
w = np.exp(hmodel.allocModel.Elogw)
except Exception:
w = hmodel.allocModel.w
colorID = 0
for kk in compListToPlot:
mu = hmodel.obsModel.get_mean_for_comp(kk)
Sigma = hmodel.obsModel.get_covar_mat_for_comp(kk)
if w[kk] < wTHR and kk not in compsToHighlight:
continue
if kk in compsToHighlight or len(compsToHighlight) == 0:
if mu.size == 1:
plotGauss1D(mu, Sigma, color=Colors[colorID])
else:
plotGauss2DContour(mu, Sigma, color=Colors[colorID])
colorID = (colorID + 1) % len(Colors)
elif kk not in compsToHighlight:
if mu.size == 1:
plotGauss1D(mu, Sigma, color='k')
else:
plotGauss2DContour(mu, Sigma, color='k')
if mu.size > 1:
pylab.axis('image')
def plotGauss2DContour(mu, Sigma, color='b', radiusLengths=[0.5, 1.25, 2]):
''' Plot elliptical contours for first 2 dims of covariance matrix Sigma,
location specified by corresponding dims from vector mu
'''
mu = np.asarray(mu)
Sigma = np.asarray(Sigma)
mu = mu[ :2]
Sigma = Sigma[ :2, :2]
D,V = np.linalg.eig( Sigma )
sqrtSigma = np.dot( V, np.sqrt(np.diag(D)) )
# Prep for plotting elliptical contours
# by creating grid of (x,y) points along perfect circle
ts = np.arange( -np.pi, np.pi, 0.01 )
x = np.sin(ts)
y = np.cos(ts)
Zcirc = np.vstack([x, y])
# Warp circle into ellipse defined by Sigma's eigenvectors
Zellipse = np.dot( sqrtSigma, Zcirc )
# plot contour lines across several radius lengths
# TODO: instead, choose radius by percentage of prob mass contained within
for r in radiusLengths:
Z = r * Zellipse + mu[:,np.newaxis]
pylab.plot(Z[0], Z[1], '.', markerfacecolor=color, markeredgecolor=color)
def plotGauss1D(mu, sigma2, color='b'):
mu = np.squeeze(mu)
sigma = np.sqrt(np.squeeze(sigma2))
assert mu.size == 1 and mu.ndim == 0
assert sigma.size == 1 and sigma.ndim == 0
xs = mu + sigma * np.arange( -4, 4, 0.01)
ps = 1./np.sqrt(2*np.pi) * 1./sigma * np.exp( -0.5 * (xs-mu)**2 / sigma**2 )
pylab.plot( xs, ps, '.', markerfacecolor=color, markeredgecolor=color)
########################################################### Plot Covar Matrix
###########################################################
def plotCovMatFromHModel(hmodel, compListToPlot=None, compsToHighlight=None, wTHR=0.001):
''' Plot cov matrix visualization for each "significant" component in hmodel
Args
-------
hmodel : bnpy HModel object
compListToPlot : array-like of integer IDs of components within hmodel
compsToHighlight : int or array-like of integer IDs to highlight
if None, all components get unique colors
if not None, only highlighted components get colors.
wTHR : float threshold on minimum weight assigned to comp tobe "plottable"
'''
if compsToHighlight is not None:
compsToHighlight = np.asarray(compsToHighlight)
if compsToHighlight.ndim == 0:
compsToHighlight = np.asarray([compsToHighlight])
else:
compsToHighlight = list()
if compListToPlot is None:
compListToPlot = np.arange(0, hmodel.allocModel.K)
try:
w = np.exp(hmodel.allocModel.Elogw)
except Exception:
w = hmodel.allocModel.w
nRow = 2
nCol = np.ceil(hmodel.obsModel.K/2.0)
colorID = 0
for plotID, kk in enumerate(compListToPlot):
if w[kk] < wTHR and kk not in compsToHighlight:
Sigma = getEmptyCompSigmaImage(hmodel.obsModel.D)
clim = [0, 1]
else:
Sigma = hmodel.obsModel.get_covar_mat_for_comp(kk)
clim = [-.25, 1]
pylab.subplot(nRow, nCol, plotID)
pylab.imshow(Sigma, interpolation='nearest', cmap='hot', clim=clim)
pylab.xticks([])
pylab.yticks([])
pylab.xlabel('%.2f' % (w[kk]))
if kk in compsToHighlight:
pylab.xlabel('***')
def getEmptyCompSigmaImage(D):
EmptySig = np.eye(D)
for dd in range(D):
EmptySig[dd, D - 1 - dd] = 1.0
return EmptySig
|
mit
|
amolkahat/pandas
|
pandas/tests/indexes/multi/test_names.py
|
4
|
4081
|
# -*- coding: utf-8 -*-
import pandas as pd
import pandas.util.testing as tm
from pandas import MultiIndex
def check_level_names(index, names):
assert [level.name for level in index.levels] == list(names)
def test_slice_keep_name():
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_index_name_retained():
# GH9857
result = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]})
result = result.set_index('z')
result.loc[10] = [9, 10]
df_expected = pd.DataFrame({'x': [1, 2, 6, 9],
'y': [2, 2, 8, 10],
'z': [-5, 0, 5, 10]})
df_expected = df_expected.set_index('z')
tm.assert_frame_equal(result, df_expected)
def test_changing_names(idx):
# names should be applied to levels
level_names = [level.name for level in idx.levels]
check_level_names(idx, idx.names)
view = idx.view()
copy = idx.copy()
shallow_copy = idx._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in idx.names]
idx.names = new_names
check_level_names(idx, new_names)
# but not on copies
check_level_names(view, level_names)
check_level_names(copy, level_names)
check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
check_level_names(idx, new_names)
def test_take_preserve_name(idx):
taken = idx.take([3, 0, 1])
assert taken.names == idx.names
def test_copy_names():
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(idx, index_names):
# names are assigned in setup
names = index_names
level_names = [level.name for level in idx.levels]
assert names == level_names
# setting bad names on existing
index = idx
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = idx.levels
major_labels, minor_labels = idx.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_duplicate_level_names_access_raises(idx):
# GH19029
idx.names = ['foo', 'foo']
tm.assert_raises_regex(ValueError, 'name foo occurs multiple times',
idx._get_level_number, 'foo')
|
bsd-3-clause
|
henryroe/xatmos
|
setup.py
|
1
|
1498
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.md').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='xatmos',
version='0.1.0',
description='A GUI viewer for looking at EEtelluric transmission, TEXES orders, and identifying absorption lines.',
long_description=readme + '\n\n' + history,
author='Henry Roe',
author_email='[email protected]',
url='https://github.com/henryroe/xatmos',
packages=[
'xatmos',
],
package_dir={'xatmos': 'xatmos'},
package_data = {'xatmos': ['atmos.txt.gz', 'hitran*txt.gz', 'orders.txt']},
include_package_data=True,
install_requires=['numpy>=1.6.1',
'pandas',
'pyface',
'traits',
'traitsui',
'matplotlib'
],
license='LICENSE.txt',
zip_safe=False,
keywords='xatmos',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
|
mit
|
tpetmanson/pfe
|
pypfe/cluster.py
|
2
|
2929
|
# -*- coding: utf-8 -*-
# Python 2.7
# Pattern based fact extraction library.
# Copyright (C) 2013 University of Tartu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.cluster import KMeans
import scipy.cluster.hierarchy as sch
import numpy as np
import tempfile
import os.path
import pylab
import matplotlib.pyplot as plt
import sys
def tfidf_matrix(X, **kwargs):
# get the tf-idf counts
count_vect = CountVectorizer(**kwargs)
counts = count_vect.fit_transform(X)
tf_transformer = TfidfTransformer().fit(counts)
counts_tfidf = tf_transformer.transform(counts)
# compute cosine similarity
matrix = linear_kernel(counts_tfidf, counts_tfidf)
return matrix
def hierarchial_sentences(X, **kwargs):
'''Perform hierarchial clustering on a vector of sentences.'''
matrix = tfidf_matrix(X, **kwargs)
# hierarchial clustering
linkage = sch.linkage(matrix, method = 'complete')
cutoff = kwargs.get('cutoff_coef', 0.45)*max(linkage[:,2])
# create the plot
fig = pylab.figure()
axdendro = fig.add_axes([0.09,0.1,0.2,0.8])
denodrogram = sch.dendrogram(linkage, orientation='right', color_threshold=cutoff)
axdendro.set_xticks([])
axdendro.set_yticks([])
# extract the indices
indices = denodrogram['leaves']
matrix = matrix[indices,:]
matrix = matrix[:,indices]
axmatrix = fig.add_axes([0.3,0.1,0.6,0.8])
im = axmatrix.matshow(matrix, aspect='auto', origin='lower')
axmatrix.set_xticks([])
axmatrix.set_yticks([])
axcolor = fig.add_axes([0.91,0.1,0.02,0.8])
pylab.colorbar(im, cax=axcolor)
# flatten the clusters
flat_clusters = sch.fcluster(linkage, cutoff, 'distance')
leaders = sch.leaders(linkage, flat_clusters)
return {'fig': fig, 'flat': flat_clusters, 'leaders': leaders[1]}
def kmeans_sentences(X, n_clusters, **kwargs):
'''Cluster sentences using kmeans.
kwargs is forwarded to count_vectorizer.'''
matrix = tfidf_matrix(X, **kwargs)
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(matrix)
return kmeans.predict(matrix)
|
gpl-3.0
|
wanggang3333/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
129
|
43401
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
ssaeger/scikit-learn
|
sklearn/cross_validation.py
|
7
|
67336
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
bsd-3-clause
|
hlin117/scikit-learn
|
examples/cluster/plot_face_segmentation.py
|
71
|
2839
|
"""
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
|
bsd-3-clause
|
mhue/scikit-learn
|
sklearn/ensemble/weight_boosting.py
|
30
|
40648
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
bsd-3-clause
|
etkirsch/scikit-learn
|
examples/tree/plot_iris.py
|
271
|
2186
|
"""
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
|
bsd-3-clause
|
RobertABT/heightmap
|
build/matplotlib/examples/pylab_examples/spine_placement_demo.py
|
3
|
2710
|
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
x = np.linspace(-np.pi,np.pi,100)
y = 2*np.sin(x)
ax = fig.add_subplot(2,2,1)
ax.set_title('centered spines')
ax.plot(x,y)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax = fig.add_subplot(2,2,2)
ax.set_title('zeroed spines')
ax.plot(x,y)
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax = fig.add_subplot(2,2,3)
ax.set_title('spines at axes (0.6, 0.1)')
ax.plot(x,y)
ax.spines['left'].set_position(('axes',0.6))
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position(('axes',0.1))
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax = fig.add_subplot(2,2,4)
ax.set_title('spines at data (1,2)')
ax.plot(x,y)
ax.spines['left'].set_position(('data',1))
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position(('data',2))
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# ----------------------------------------------------
def adjust_spines(ax,spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward',10)) # outward by 10 points
spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
fig = plt.figure()
x = np.linspace(0,2*np.pi,100)
y = 2*np.sin(x)
ax = fig.add_subplot(2,2,1)
ax.plot(x,y)
adjust_spines(ax,['left'])
ax = fig.add_subplot(2,2,2)
ax.plot(x,y)
adjust_spines(ax,[])
ax = fig.add_subplot(2,2,3)
ax.plot(x,y)
adjust_spines(ax,['left','bottom'])
ax = fig.add_subplot(2,2,4)
ax.plot(x,y)
adjust_spines(ax,['bottom'])
plt.show()
|
mit
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/mixture/dpgmm.py
|
5
|
35901
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos ([email protected])
# Bertrand Thirion <[email protected]>
#
# Based on mixture.py by:
# Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.linalg import pinvh
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.fixes import logsumexp
from ..utils.extmath import squared_norm, stable_cumsum
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilities"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = stable_cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
"""Dirichlet Process Gaussian Mixture Models
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with
parameter ``weight_concentration_prior_type='dirichlet_process'``
instead.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with parameter
``weight_concentration_prior_type='dirichlet_distribution'`` instead.
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <bgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
mit
|
YuzhongHuang/SoftwareSystems
|
hw01/ch01.py
|
24
|
2897
|
"""Modified version of the example code from Janert,
Feedback Control For Computer Systems
This modified version requires pandas, numpy, and matplotlib.
If you use apt:
sudo apt-get install python-pandas python-numpy python-matplotlib
"""
import numpy
import pandas
import random
import matplotlib.pyplot as pyplot
class Buffer:
def __init__( self, max_wip, max_flow ):
"""Initializes the buffer:
max_wip: maximum work in progress
max_flow: maximum work completed per time step
"""
self.queued = 0
self.wip = 0 # work-in-progress ("ready pool")
self.max_wip = max_wip
self.max_flow = max_flow # avg outflow is max_flow/2
def work( self, u ):
# Add to ready pool
u = max( 0, int(round(u)) )
u = min( u, self.max_wip )
self.wip += u
# Transfer from ready pool to queue
r = int( round( random.uniform( 0, self.wip ) ) )
self.wip -= r
self.queued += r
# Release from queue to downstream process
r = int( round( random.uniform( 0, self.max_flow ) ) )
r = min( r, self.queued )
self.queued -= r
return self.queued
class Controller:
def __init__( self, kp, ki ):
"""Initializes the controller.
kp: proportional gain
ki: integral gain
"""
self.kp, self.ki = kp, ki
self.i = 0 # Cumulative error ("integral")
def work( self, e ):
"""Computes the number of jobs to be added to the ready queue.
e: error
returns: float number of jobs
"""
self.i += e
return self.kp*e + self.ki*self.i
# ============================================================
def closed_loop( c, p, tm=5000 ):
"""Simulates a closed loop control system.
c: Controller object
p: Buffer object
tm: number of time steps
returns: tuple of sequences (times, targets, errors)
"""
def setpoint( t ):
if t < 100: return 0
if t < 300: return 50
return 10
y = 0
res = []
for t in range( tm ):
r = setpoint(t)
e = r - y
u = c.work(e)
y = p.work(u)
#print t, r, e, u, y
res.append((t, r, e, u, y))
return zip(*res)
# ============================================================
c = Controller( 1.25, 0.01 )
p = Buffer( 50, 10 )
# run the simulation
ts, rs, es, us, ys = closed_loop( c, p, 1000 )
print 'RMS error', numpy.sqrt(numpy.mean(numpy.array(es)**2))
# generate the smoothed curve using a rolling mean
# (I think the curves in the book use loess)
ys_smooth = pandas.rolling_mean(numpy.array(ys), 20)
# make the plot
pyplot.plot(ts, rs, color='green', label='target')
pyplot.plot(ts, ys, color='red', label='queue length')
pyplot.plot(ts, ys_smooth, color='blue', label='trend')
pyplot.show()
|
gpl-3.0
|
nzavagli/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/lib/function_base.py
|
30
|
124613
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X -= X.mean(axis=1-axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
mit
|
0todd0000/rft1d
|
rft1d/examples/val_max_1_onesample_t_1d.py
|
1
|
1235
|
import numpy as np
from matplotlib import pyplot
import rft1d
#(0) Set parameters:
np.random.seed(123456789)
nResponses = 8
nIterations = 2000
nNodes = 101
FWHM = 10.0
### derived parameters:
df = nResponses-1
sqrtN = np.sqrt(nResponses)
#(1) Generate Gaussian 1D fields, compute test stat, store field maximum:
T = []
generator = rft1d.random.Generator1D(nResponses, nNodes, FWHM)
for i in range(nIterations):
y = generator.generate_sample()
t = y.mean(axis=0) / y.std(ddof=1, axis=0) * sqrtN
T.append( t.max() )
T = np.asarray(T)
#(2) Survival functions:
heights = np.linspace(2, 5, 21)
sf = np.array( [ (T>h).mean() for h in heights] )
sfE = rft1d.t.sf(heights, df, nNodes, FWHM) #theoretical
sf0D = rft1d.t.sf0d(heights, df) #theoretical (0D)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sf0D, 'r-', label='Theoretical (0D)')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (t_\mathrm{max} > u)$', size=20)
ax.legend()
ax.set_title('One-sample t validation (1D)', size=20)
pyplot.show()
|
gpl-3.0
|
mattilyra/scikit-learn
|
examples/calibration/plot_calibration_curve.py
|
113
|
5904
|
"""
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
|
bsd-3-clause
|
Edgar324/GeoVis
|
weathervis/candidateselector.py
|
1
|
2538
|
import numpy as np
from skimage import segmentation
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
import math
class CandidateSelector:
def __init__(self, data):
self.name = 'Candidate Selector'
self.data = data
self.candidates = []
def GenerateKMeansCandidates(self, candNumber, disWeight):
self.pixelLabelIndex = segmentation.slic(self.data, compactness=disWeight, n_segments=candNumber)
self.reIndexLabels()
def GenerateHierarchicalCandidates(self, candNumber):
tempImg = np.reshape(self.data, (-1, 1))
connectivity = grid_to_graph(*self.data.shape[:2])
ward = AgglomerativeClustering(n_clusters=candNumber, linkage='ward', connectivity=connectivity).fit(tempImg)
self.pixelLabelIndex = np.reshape(ward.labels_, self.data.shape[:2])
self.reIndexLabels()
def reIndexLabels(self):
# Generate the candidate position index
height, width = self.data.shape[:2]
seqIndex = {}
for h in range(0, height):
for w in range(0, width):
currentLabel = self.pixelLabelIndex[h, w]
if (seqIndex.get(currentLabel) == None):
newLabelIndex = {}
newLabelIndex['x'] = w
newLabelIndex['y'] = h
newLabelIndex['nodeNum'] = 1
newLabelIndex['r'] = 0
self.candidates.append(newLabelIndex)
seqIndex[currentLabel] = self.candidates.index(newLabelIndex)
else:
tempIndex = seqIndex[currentLabel]
self.candidates[tempIndex]['x'] += w
self.candidates[tempIndex]['y'] += h
self.candidates[tempIndex]['nodeNum'] += 1
for index in self.candidates:
if (index['nodeNum'] != 0):
index['x'] /= float(index['nodeNum'])
index['y'] /= float(index['nodeNum'])
for h in range(0, height):
for w in range(0, width):
currentLabel = self.pixelLabelIndex[h, w]
tempIndex = seqIndex[currentLabel]
self.candidates[tempIndex]['r'] += math.sqrt(pow(h - self.candidates[tempIndex]['y'], 2) + pow(w - self.candidates[tempIndex]['x'], 2))
for index in self.candidates:
if (index['nodeNum'] != 0):
index['r'] /= float(index['nodeNum'])
|
apache-2.0
|
laijingtao/landlab
|
landlab/ca/examples/turbulent_suspension_with_settling_and_bleaching.py
|
2
|
16819
|
#!/usr/env/python
"""
isotropic_turbulent_suspension_with_settling_and_bleaching.py
Example of a continuous-time, stochastic, pair-based cellular automaton model,
which simulates the diffusion of suspended particles in a turbulent fluid.
Particles start with an accumulated luminescence signal L = 1, and are bleached
by exposure to light at a rate that depends on distance below the upper surface.
Written by Greg Tucker, July 2015
"""
from __future__ import print_function # for both python 2 and 3 compability
import time
import matplotlib
from pylab import figure, show, clf
from numpy import where, exp, amin
from landlab import RasterModelGrid, ModelParameterDictionary
from landlab.plot.imshow import imshow_node_grid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.oriented_raster_cts import OrientedRasterCTS
class TurbulentSuspensionAndBleachingModel(OrientedRasterCTS):
"""
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
def __init__(self, input_stream):
"""
Reads in parameters and initializes the model.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
# Get a source for input parameters.
params = ModelParameterDictionary(input_stream)
# Read user-defined parameters
nr = params.read_int('model_grid_row__count') # number of rows (CSDMS Standard Name [CSN])
nc = params.read_int('model_grid_column__count') # number of cols (CSN)
self.plot_interval = params.read_float('plot_interval') # interval for plotting output, s
self.run_duration = params.read_float('model__run_time') # duration of run, sec (CSN)
self.report_interval = params.read_float('model__report_interval') # report interval, in real-time seconds
self.bleach_T0 = params.read_float('surface_bleaching_time_scale') # time scale for bleaching at fluid surface, s
self.zstar = params.read_float('light_attenuation_length') # length scale for light attenuation in fluid, CELLS
# Derived parameters
self.fluid_surface_height = nr-0.5
# Calculate when we next want to report progress.
self.next_report = time.time() + self.report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'fluid', 1 : 'particle' }
xn_list = self.setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# For visual display purposes, set all boundary nodes to fluid
node_state_grid[mg.closed_boundary_nodes] = 0
# Initialize the node-state array: here, the initial condition is a pile of
# resting grains at the bottom of a container.
bottom_rows = where(mg.node_y<0.4*nr)[0]
node_state_grid[bottom_rows] = 1
# Create a data array for bleaching.
# Here, osl=optically stimulated luminescence, normalized to the original
# signal (hence, initially all unity). Over time this signal may get
# bleached out due to exposure to light.
self.osl = mg.add_zeros('node', 'osl')
self.osl[bottom_rows] = 1.0
self.osl_display = mg.add_zeros('node', 'osl_display')
self.osl_display[bottom_rows] = 1.0
# We'll need an array to track the last time any given node was
# updated, so we can figure out the duration of light exposure between
# update events
self.last_update_time = mg.add_zeros('node','last_update_time')
# Call the base class (RasterCTS) init method
super(TurbulentSuspensionAndBleachingModel, \
self).__init__(mg, ns_dict, xn_list, node_state_grid, prop_data=self.osl)
# Set up plotting (if plotting desired)
if self.plot_interval <= self.run_duration:
self.initialize_plotting()
def initialize_plotting(self):
"""
Creates a CA plotter object, sets its colormap, and plots the initial
model state.
"""
# Set up some plotting information
grain = '#5F594D'
bleached_grain = '#CC0000'
fluid = '#D0E4F2'
clist = [fluid,bleached_grain,grain]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
self.ca_plotter = CAPlotter(self, cmap=my_cmap)
# Plot the initial grid
self.ca_plotter.update_plot()
# Make a colormap for use in showing the bleaching of each grain
clist = [(0.0, (1.0, 1.0, 1.0)), (0.49, (0.8, 0.8, 0.8)), (1.0, (0.0, 0.0, 0.0))]
self.cmap_for_osl = matplotlib.colors.LinearSegmentedColormap.from_list('osl_cmap', clist)
def setup_transition_list(self):
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 10.0
2 (1-0) 1 (0-1) right motion 10.0
3 (1-1) (none) - -
4 (0-0) (none) - -
5 (0-1) 2 (1-0) down motion 10.55
6 (1-0) 1 (0-1) up motion 9.45
7 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append four transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left cell, right cell, orientation [0=horizontal])
# - Tuple representing new pair state
# (bottom cell, top cell, orientation [1=vertical])
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
# - Flag indicating that the transition involves an exchange of properties
# - Function to be called after each transition, to update a property
# (in this case, to simulate bleaching of the luminescence signal)
xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right motion', True, self.update_bleaching) )
xn_list.append( Transition((0,1,1), (1,0,1), 10.55, 'down motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,1), (0,1,1), 9.45, 'up motion', True, self.update_bleaching) )
return xn_list
def bleach_grain(self, node, dt):
"""
Updates the luminescence signal at node.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.bleach_grain(10, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
"""
depth = self.fluid_surface_height - self.grid.node_y[node]
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid[node]] *= exp( -dt/T_bleach )
def update_bleaching(self, ca_unused, node1, node2, time_now):
"""
Updates the luminescence signal at a pair of nodes that have just
undergone a transition, if either or both nodes is a grain.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.update_bleaching(tsbm, 10, 13, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
>>> tsbm.prop_data[tsbm.propid[13]]
0.0
"""
if self.node_state[node1]==1:
dt = time_now - self.last_update_time[self.propid[node1]]
self.bleach_grain(node1, dt)
self.last_update_time[self.propid[node1]] = time_now
if self.node_state[node2]==1:
dt = time_now - self.last_update_time[self.propid[node2]]
self.bleach_grain(node2, dt)
self.last_update_time[self.propid[node2]] = time_now
def synchronize_bleaching(self, sync_time):
"""
Brings all nodes up to the same time, sync_time, by applying bleaching
up to this time, and updating last_update_time.
Notes
-----
In a CellLab-CTS model, the "time" is usually different for each node:
some will have only just recently undergone a transition and had their
properties (in this case, OSL bleaching) updated, while others will
have last been updated a long time ago, and some may never have had a
transition. If we want to plot the properties at a consistent time, we
need to bring all node properties (again, in this case, OSL) up to
date. This method does so.
We multiply elapsed time (between last update and "sync time") by
the node state, because we only want to update the solid particles---
because the state of a particle is 1 and fluid 0, this multiplication
masks out the fluid nodes.
We don't call bleach_grain(), because we want to take advantage of
numpy array operations rather than calling a method for each node.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.synchronize_bleaching(1.0)
>>> int(tsbm.osl[10]*100000)
85897
"""
dt = (sync_time - self.last_update_time[self.propid])*self.node_state
assert (amin(dt)>=0.0), 'sync_time must be >= 0 everywhere'
depth = self.fluid_surface_height - self.grid.node_y
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid] *= exp( -dt/T_bleach )
self.last_update_time[self.propid] = sync_time*self.node_state
def go(self):
"""
Runs the model.
"""
# RUN
while self.current_time < self.run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= self.next_report:
print('Current sim time',self.current_time,'(',100*self.current_time/self.run_duration,'%)')
self.next_report = current_real_time + self.report_interval
# Run the model forward in time until the next output step
self.run(self.current_time+self.plot_interval, self.node_state,
plot_each_transition=False)
self.current_time += self.plot_interval
self.synchronize_bleaching(self.current_time)
if self.plot_interval <= self.run_duration:
# Plot the current grid
self.ca_plotter.update_plot()
# Display the OSL content of grains
figure(3)
clf()
self.osl_display[:] = self.osl[self.propid]+self.node_state
imshow_node_grid(self.grid, 'osl_display', limits=(0.0, 2.0),
cmap=self.cmap_for_osl)
show()
figure(1)
def finalize(self):
# FINALIZE
# Plot
self.ca_plotter.finalize()
# If user runs this file, activate the main() function.
if __name__ == "__main__":
# Parse command-line argument, if any
import sys
if len(sys.argv)>1:
input_file_name = sys.argv[1]
else:
input_file_name = 'tsbm_inputs.txt'
# Instantiate the model
ca_model = TurbulentSuspensionAndBleachingModel(input_file_name)
# Run the model
ca_model.go()
# Clean up
ca_model.finalize()
|
mit
|
superDross/MethyCoverageParser
|
scripts/reshapers/cpg_amplicon_coverage.py
|
1
|
4915
|
# created by David Ross
import pandas as pd
import argparse
import os
from append_probe_info import add_probe
def CpG_cov(cov_dir, field):
''' Get the methylated/unmethylated coverage for all CpG sites
from all bismark2bedgraph coverage files in a given directory and output
into a Pandas DataFrame.
Args:
cov_dir: dir containing BME coverage files
field: the column values to parse into cells (meth_cov or unmeth_cov)
'''
if field not in ('meth_cov', 'unmeth_cov'):
raise IOError("field must be meth_cov or unmeth_cov not {}".format(field))
cov_files = sorted([f for f in os.listdir(cov_dir) if f.endswith('cov')])
print("NOTE: the following BME coverage files will be combined:\n"+", ".join(cov_files))
df_store = []
for cov_file in cov_files:
sam = cov_file.split("/")[-1].split(".")[0]
df = pd.read_csv(cov_dir+cov_file, sep="\t", header=None)
df.columns = ['Chromosome', 'Position', 'pos_end', 'meth_percent', 'meth_cov', 'unmeth_cov']
df['coverage'] = df['meth_cov'] + df['unmeth_cov']
df = df[['Chromosome', 'Position', field]]
df.rename(columns={field: sam}, inplace=True)
df['methy_status'] = field
# setting index and transposition is required for proper concatanation
df_store.append(df.set_index(['Chromosome', 'Position', 'methy_status']).T)
cpg_df = pd.concat(df_store).T
return cpg_df
def amplicon_cpg_coverage(cov_df, amplicon):
''' Calulate the total CpG coverage across all given amplicons and
create an amplicon CpG file.
Args:
cov_df: CpG_cov() outputted DataFrame
amplicon: BED like file describing primer start , end pos
and strand
'''
# store all amplicon cpg coverage values
series_store = []
with open(amplicon) as amp:
cov_df = cov_df.reset_index()
for line in amp.readlines():
chrom, start, end, strand = line.strip("\n").split("\t")
# filter for CpG sites within the amplicon
filtered = cov_df[(cov_df['Chromosome'] == str(chrom)) & (cov_df['Position'] >= int(start)) &
(cov_df['Position'] <= int(end))]
# sum all the filtered CpG sites coverage values together to get a summarised series
headers = [x for x in filtered if x not in ('Chromosome', 'Position', 'methy_status')]
sum_filtered = filtered[headers].sum()
# create a series with amplicon & methylation information and concat with summarised CpG coverage series
amp_range = pd.Series([chrom, start, end, strand, cov_df['methy_status'].unique()[0]],
index=['Chromosome', 'Start', 'End', 'Strand', 'Methylation Status'])
amp_series = amp_range.append(sum_filtered)
series_store.append(amp_series)
# concat all the series together and set index
df = pd.concat(series_store, axis=1).T
df = df.set_index(['Chromosome', 'Start', 'End', 'Strand', 'Methylation Status']).sort_index()
return df
def main(cov_dir, amplicon, out, probe_file=None):
# produce a dataframe detailing the coverage for all meth/unmeth CpG sites from bismark2bedGrpah coverage files
meth_site_cov = CpG_cov(cov_dir, 'meth_cov')
unmeth_site_cov = CpG_cov(cov_dir, 'unmeth_cov')
# get the total CpG site methylation coverage over a set of amplicons and transform into dataframes
meth_amplicon_cov = amplicon_cpg_coverage(meth_site_cov, amplicon)
unmeth_amplicon_cov = amplicon_cpg_coverage(unmeth_site_cov, amplicon)
# concatenate both the methyalted and non methylated counterparts
com_cov = pd.concat([meth_amplicon_cov, unmeth_amplicon_cov]).sort_index()
# add probe name to the amplicons
if probe_file:
com_cov = add_probe(com_cov, probe_file)
com_cov = com_cov.set_index(['Probe', 'Chromosome', 'Start', 'End', 'Strand', 'Methylation Status'])
com_cov.to_csv(out, sep="\t")
return com_cov
def get_parser():
parser = argparse.ArgumentParser(description="Parses bedgraph coverage files to create a dataframe of samples CpG methylated/unmethylated coverage at given genomic range of interest")
parser.add_argument('-b', '--bedgraph_dir', help='dir containing bedgraph coverage files')
parser.add_argument('-o', '--output', help='output file name')
parser.add_argument('-a', '--amplicon', help='amplicon bed file containing primer positions and strand')
parser.add_argument('-p', '--probe', nargs='?', default=None, help='probe list to filter positions for')
return parser
def cli():
parser = get_parser()
args = vars(parser.parse_args())
main(args['bedgraph_dir'], args['amplicon'], args['output'], args['probe'])
if __name__ == '__main__':
cli()
|
gpl-3.0
|
albugrimenko/Python_Pieces
|
tools/setup.py
|
1
|
1449
|
"""
Basic check for all required libraries.
NOTE: must be modified for the needs of each specific project.
Scikit-learn requires:
- Python (>= 2.6 or >= 3.3)
- NumPy (>= 1.6.1)
- SciPy (>= 0.9)
Sources:
http://scikit-learn.org/stable/developers/advanced_installation.html
http://www.lfd.upython ci.edu/~gohlke/pythonlibs/
http://conda.pydata.org/miniconda.html
http://winpython.github.io/ - Windows installation.
@author: Alex Bugrimenko
"""
import sys
import importlib
def check_lib(lib_name):
""" Checks if a specified library is installed. """
res = False
print("... checking for {0} ...".format(lib_name))
try:
# import lib_name
importlib.import_module(lib_name)
print("+++ {0} ... OK".format(lib_name))
res = True
except ImportError:
print("--! ERROR: {0} is MISSING:\n--! {1}".format(lib_name, sys.exc_info()[1]))
except:
print("--! ERROR:", sys.exc_info())
return res
def check_required_libs():
""" Checks all required libraries """
check_lib("json") # used in config_json
check_lib("time")
check_lib("datetime")
check_lib("dateutil")
check_lib("unittest")
# ML libs
# check_lib("pickle")
# check_lib("numpy")
# check_lib("scipy")
# check_lib("sklearn")
# check_lib("pandas")
if __name__ == "__main__":
check_required_libs()
|
mit
|
mjudsp/Tsallis
|
sklearn/datasets/tests/test_20news.py
|
280
|
3045
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
bsd-3-clause
|
shiinoandra/wavegano
|
Program/Wavegano/Wavegano/Helper.py
|
1
|
2298
|
import numpy
import Wave
from scipy.io.wavfile import read
from scipy.io.wavfile import write
import matplotlib.pyplot as pl
import operation as op
import binascii
import math
class WavIO:
@staticmethod
def open(path):
wav = read(path)
waveObj = Wave.Wave("sample")
waveObj.bitrate = wav[0]
samples = wav[1]
samples = numpy.array(samples,dtype =numpy.uint16)
#for i in range(len(samples)):
# samples[i] = samples[i] + 32767
waveObj.samples = samples
waveObj.max = samples.max
waveObj.min = samples.min
waveObj.shape = samples.shape
waveObj.type = samples.dtype
return waveObj
@staticmethod
def write(path,wavObj):
rate = wavObj.bitrate
write(path+wavObj.name ,rate,wavObj.samples)
class payloadIO:
@staticmethod
def open(path):
message = open(path).read()
return message
@staticmethod
def write(path,message):
f = open(path,mode='w')
f.write(message)
f.close()
class analytics:
@staticmethod
def calculatePSNR(original_wave,corrupted_wave):
samplelen = len(original_wave)
sum = 0
for i in range(samplelen):
sum+=(math.pow((original_wave[i] - corrupted_wave[i]),2))
MSE = float(sum)/float(samplelen)
if(MSE == 0):
return -1
else:
max =65535 #16bit
PSNR = 10*(math.log10((math.pow(max,2)/MSE)))
return PSNR
#w = WavIO.open("D:\coba.wav")
#w.print_info()
#tes = [3732,1664,12523,4482,121,17543,2243,6742,9,10,11,12,13,14,15,16,20,11,33,14,12,10,10,1004,1012,1001,1000,1000,1000,1000,1013,999,998,990,992,991]
#bin = op.operation.numToBinary(tes)
#secret = payloadIO.open("D:\secret.txt")
#secret = op.operation.stringToBinary(secret)
#result = op.operation.RDE_Array(bin,secret,20)
#intM1 = result[0]
#reducedMap = result[2]
#locMap1 = result[3]
#op.operation.inv_RDE_Array(intM1,locMap1,reducedMap)
#print(result[0])
#print(result[1])
#print(result[2])
#bigits = op.operation.makeBigit(bin)
#print(bigits[10])
#print(bigits.shape)
#pl.figure(1)
#pl.plot(w.samples)
#pl.ylabel = "Amplitudo"
#pl.xlabel = "Waktu"
#pl.title = "coba.wav"
#pl.show()
|
mit
|
boland1992/seissuite_iran
|
bin/new_workflow.py
|
1
|
35858
|
#!/usr/bin/python -u
"""
[Advice: run this script using python with unbuffered output:
`python -u 01_timeseries_process.py`]
This script reads seismic waveform data from a set of stations, and
calculates the cross-correlations between all pairs of stations. The
data (in miniseed format) must be located in folder *MSEED_DIR*. The
stations information (coordinates, instrument response) can be read
from dataless seed files (if *USE_DATALESSPAZ* = True) located in
folder *DATALESS_DIR*, and/or stationXML files (if *USE_STATIONXML* =
True) located in folder *STATIONXML_DIR*. Note that two different
stations MUST HAVE DIFFERENT NAMES, even if they do not belong to
the same network. Also, one given station cannot have several
sets of coordinates: if so, it will be skipped.
In the current version of the program, miniseed files MUST be
organized inside their directory as:
<year>-<month>/<network>.<station>.<channel>.mseed, e.g.:
1988-10/BL.JFOB.BHZ.mseed
So, there is one sub-directory per month, and inside it, one miniseed
file per month and per station.
The implemented algorithm follows the lines of Bensen et al.,
"Processing seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
The procedure consists in stacking daily cross-correlations between
pairs of stations, from *FIRSTDAY* to *LASTDAY* and, in each given day,
rejecting stations whose data fill is < *MINFILL*. Define a subset of
stations to cross-correlate in *CROSSCORR_STATIONS_SUBSET* (or let it
empty to cross-correlate all stations). Define a list of locations to
skip in *CROSSCORR_SKIPLOCS*, if any. The cross-correlations are
calculated between -/+ *CROSSCORR_TMAX* seconds.
Several pre-processing steps are applied to the daily seismic waveform
data, before the daily cross-correlation is calculated and stacked:
(1) removal of the instrument response, the mean and the trend;
(2) band-pass filter between *PERIODMIN* and *PERIODMAX* sec
(3) down-sampling to sampling step = *PERIOD_RESAMPLE* sec
(4) time-normalization:
- if *ONEBIT_NORM* = False, normalization of the signal by its
(smoothed) absolute amplitude in the earthquake period band,
defined as *PERIODMIN_EARTHQUAKE* - *PERIODMIN_EARTHQUAKE* sec.
The smoothing window is *PERIODMAX_EARTHQUAKE* / 2;
- if *ONEBIT_NORM* = False, one-bit normalization, wherein
only the sign of the signal is kept (+1 or -1);
(5) spectral whitening of the Fourier amplitude spectrum: the Fourier
amplitude spectrum of the signal is divided by a smoothed version
of itself. The smoonthing window is *WINDOW_FREQ*.
Note that all the parameters mentioned above are defined in the
configuration file.
When all the cross-correlations are calculated, the script exports
several files in dir *CROSS*
"""
import os
import warnings
import datetime as dt
import itertools as it
import pickle
import obspy.signal.cross_correlation
import time
import glob
import sqlite3 as lite
import shutil
import numpy as np
import matplotlib.pyplot as plt
from obspy.core import Trace
# set epoch timestamp
epoch = dt.datetime(1970, 1, 1)
total_verbose = True
psd = False
# DECLUSTER STATIONS!
# remove stations that are too close to one another (set by degree radius!)
#from seissuite.spacing.search_station import Coordinates
#import matplotlib.pyplot as plt
#import numpy as np
# turn on multiprocessing to get one merged trace per station?
# to preprocess trace? to stack cross-correlations?
MULTIPROCESSING = {'merge trace': True,
'process trace': True,
'cross-corr': True}
# how many concurrent processes? (set None to let multiprocessing module decide)
NB_PROCESSES = None
if any(MULTIPROCESSING.values()):
import multiprocessing as mp
# create a list of configuration files that will be iterated over!
# must be 1 or more
from seissuite.ant.psconfig import (create_config_list, run_config,
remove_config)
config_list = create_config_list()
config_file = config_list[0]
total_time0 = dt.datetime.now()
#for config_file in config_list:
for i in range(0, 100):
# global variables MUST be defined
# with the function in the seissuite.ant.psconfig module
run_config(config_file)
from seissuite.ant import (pscrosscorr, psstation, pspreprocess, pserrors,
psstationSQL)
# import CONFIG class initalised in ./configs/tmp_config.pickle
config_pickle = 'configs/tmp_config.pickle'
f = open(name=config_pickle, mode='rb')
CONFIG = pickle.load(f)
f.close()
# import variables from initialised CONFIG class.
MSEED_DIR = CONFIG.MSEED_DIR
DATABASE_DIR = CONFIG.DATABASE_DIR
DATALESS_DIR = CONFIG.DATALESS_DIR
STATIONXML_DIR = CONFIG.STATIONXML_DIR
CROSSCORR_DIR = CONFIG.CROSSCORR_DIR
USE_DATALESSPAZ = CONFIG.USE_DATALESSPAZ
USE_STATIONXML = CONFIG.USE_STATIONXML
CROSSCORR_STATIONS_SUBSET = CONFIG.CROSSCORR_STATIONS_SUBSET
CROSSCORR_SKIPLOCS = CONFIG.CROSSCORR_SKIPLOCS
FIRSTDAY = CONFIG.FIRSTDAY
LASTDAY = CONFIG.LASTDAY
MINFILL = CONFIG.MINFILL
FREQMIN = CONFIG.FREQMIN
FREQMAX = CONFIG.FREQMAX
CORNERS = CONFIG.CORNERS
ZEROPHASE = CONFIG.ZEROPHASE
PERIOD_RESAMPLE = CONFIG.PERIOD_RESAMPLE
ONEBIT_NORM = CONFIG.ONEBIT_NORM
FREQMIN_EARTHQUAKE = CONFIG.FREQMIN_EARTHQUAKE
FREQMAX_EARTHQUAKE = CONFIG.FREQMAX_EARTHQUAKE
WINDOW_TIME = CONFIG.WINDOW_TIME
WINDOW_FREQ = CONFIG.WINDOW_FREQ
XCORR_INTERVAL = CONFIG.XCORR_INTERVAL
CROSSCORR_TMAX = CONFIG.CROSSCORR_TMAX
PLOT_CLASSIC = CONFIG.PLOT_CLASSIC
PLOT_DISTANCE = CONFIG.PLOT_DISTANCE
MAX_DISTANCE = CONFIG.MAX_DISTANCE
RANDOM_STACK = CONFIG.RANDOM_STACK
RESP_REMOVE = CONFIG.RESP_REMOVE
#FULL_COMB = CONFIG.FULL_COMB
# initialise the required databases if they haven't already been.
#if no two SQL databases exist, then create them!
TIMELINE_DB = os.path.join(DATABASE_DIR, 'timeline.db')
RESP_DB = os.path.join(DATABASE_DIR, 'response.db')
# RESP_DB = os.path.join(DATABASE_DIR, 'response.db')
# if not os.path.exists(RESP_DB):
# initialise response database for use with automated data selection!
# lite.connect(RESP_DB)
# from seissuite.database import response_database
print TIMELINE_DB
if not os.path.exists(RESP_DB):
try:
lite.connect(RESP_DB)
print "\nCreating response database. Please be patient ... "
from seissuite.database import response_database
except:
print "Response database could not be initialised ... "
if not os.path.exists(TIMELINE_DB):
# initialise timeline database to help the application find files!
lite.connect(TIMELINE_DB)
print "\nCreating timeline database. Please be patient ... "
from seissuite.database import create_database
if psd:
import powerdensity
print "\nProcessing parameters:"
print "- dir of miniseed data: " + MSEED_DIR
print "- dir of dataless seed data: " + DATALESS_DIR
print "- dir of stationXML data: " + STATIONXML_DIR
print "- output dir: " + CROSSCORR_DIR
print "- cross-correlation length (mins): " + str(XCORR_INTERVAL)
print "- cross-correlation maximum time interval (s): " + str(CROSSCORR_TMAX)
print "- band-pass: {:.1f}-{:.1f} s".format(1.0 / FREQMAX, 1.0 / FREQMIN)
if ONEBIT_NORM:
print "- normalization in time-domain: one-bit normalization"
else:
s = ("- normalisation in time-domain: "
"running normalisation in earthquake band ({:.1f}-{:.1f} s)")
print s.format(1.0 / FREQMAX_EARTHQUAKE, 1.0 / FREQMIN_EARTHQUAKE)
fmt = '%d/%m/%Y'
s = "- cross-correlation will be stacked between {}-{}"
print s.format(FIRSTDAY.strftime(fmt), LASTDAY.strftime(fmt))
subset = CROSSCORR_STATIONS_SUBSET
if subset:
print " for stations: {}".format(', '.join(subset))
print
# Initializing collection of cross-correlations
xc = pscrosscorr.CrossCorrelationCollection()
#create a metadata list, may need dictionary based on how much info required
metadata = []
#ask if system has crashed or stopped before another process was finished?
print "\nScanning for partial pickle cross-correlation files ... "
#maybe create pause statement for interesting load menu.
# loading cross-correlations (looking for *.part.pickle files in folders in
#in dir *CROSSCORR_DIR*)
folder_list = sorted(glob.glob(os.path.join(CROSSCORR_DIR, '*')))
pickle_list = []
index = 0 #creating index for call
for folder in folder_list:
#check to see if there are any pickle files in the xcorr time folder
if len(glob.glob(os.path.join(folder, '*.part.pickle'))) < 1:
#print("There are no .pickle files in this folder. Skipping ...")
continue
else:
#append name of pickle file path location string to pickle_list
pickle_list.append(glob.glob(os.path.join(folder, \
'*.part.pickle'))[0])
if len(pickle_list) < 1:
print("\nThere are no partial pickle files to begin again from.")
print("\nThe program will start from the beginning")
res = ""
else:
print "\nPlease choose a file to begin again from, or a combination thereof."
print "Else hit enter to continue anew"
#print combinations of partial pickle files available
print '\n0 - All except backups (*~)'
print '\n'.join('{} - {}'.format(i + 1, f.split('/')[-2])
for i, f in enumerate(pickle_list))
#change folder_list to pickle_list if this gives problems
#res = False#raw_input('\n')
res = ''
#res = raw_input('\n')
#IF LIST INDEX OUT OF RANGE START PROGRAM ALSO
#if beginning again, reset time-series intervals to the where the selected
# .part.pickle file left off!
if not res:
# ========================================
#set output file name as normal
# ========================================
time_string = str(time.strftime("%d.%m.%Y") + "-" + time.strftime("%X"))
responsefrom = []
if USE_DATALESSPAZ:
responsefrom.append('datalesspaz')
if USE_STATIONXML:
responsefrom.append('xmlresponse')
OUTBASENAME_PARTS = [
'XCORR-STACK',
'-'.join(s for s in CROSSCORR_STATIONS_SUBSET) \
if CROSSCORR_STATIONS_SUBSET else None,
'{}-{}'.format(FIRSTDAY.strftime("%d.%m.%Y"),
LASTDAY.strftime("%d.%m.%Y")),
'1bitnorm' if ONEBIT_NORM else None,
'+'.join(responsefrom)
]
OUTFILESNAME = '_'.join(p for p in OUTBASENAME_PARTS if p)
OUTFILESPATH = os.path.join(CROSSCORR_DIR, time_string, OUTFILESNAME)
OUTFOLDERS = os.path.join(CROSSCORR_DIR,
time_string,
'XCORR_PLOTS')
OUT_SNR = os.path.join(CROSSCORR_DIR, time_string, 'SNR_PLOTS')
#create unique folder in CROSS output folder named by the present time.
if not os.path.exists(OUTFOLDERS):\
os.makedirs(OUTFOLDERS)
if not os.path.exists(OUT_SNR):\
os.makedirs(OUT_SNR)
# copy configuration file to output so parameters are known for each run
OUTCONFIG = os.path.join(CROSSCORR_DIR, time_string,
os.path.basename(config_file))
print 'Copying configuration file to output directory ... '
shutil.copy(config_file, OUTCONFIG)
METADATA_PATH = '{}metadata.pickle'.format(OUTFILESPATH.\
replace(os.path.basename(OUTFILESPATH), ""))
else:
# ========================================
#reset time as previous time, reset output paths as previous path name
#reset cross-correlation dictionaries
# ========================================
print
PART_PICKLE = pickle_list[int(res)-1]
OUTFILESPATH = PART_PICKLE[:-12]
out_basename = os.path.basename(OUTFILESPATH)
print "Opening {} partial file for restart ... ".format(out_basename)
# re-initialising .part.pickle collection of cross-correlations
xc = pscrosscorr.load_pickled_xcorr(PART_PICKLE)
for key in xc.keys():
for key2 in xc[key].keys():
#help(xc[key][key2])
#print xc[key][key2].endday
a=5
#most recent day last endday of list
#read in metadata to find latest time slot. Then assign this to FIRSTDAY
METADATA_PATH = '{}metadata.pickle'.format(OUTFILESPATH.\
replace(os.path.basename(OUTFILESPATH), ""))
metadata = pscrosscorr.load_pickled_xcorr(METADATA_PATH)
#print "metadata: ", metadata[-5:]
#re-assign FIRSTDAY variable to where the data was cut off
#del metadata[-1]
FIRSTDAY = metadata[len(metadata) - 1] #+ \
#dt.timedelta(minutes=XCORR_INTERVAL)
# FIND RESTART DATE FROM PARTIAL PICKLE FILE, NOT THE METADATA PICKLE
# ============
# Main program
# ============
# Reading inventories in dataless seed and/or StationXML files
dataless_inventories = []
if USE_DATALESSPAZ:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dataless_inventories = psstationSQL.get_dataless_inventories(DATALESS_DIR,
verbose=False)
xml_inventories = []
if USE_STATIONXML:
xml_inventories = psstationSQL.get_stationxml_inventories(STATIONXML_DIR,
verbose=False)
# Getting list of stations
#stations, subdir_len = psstation.get_stations(mseed_dir=MSEED_DIR,
# xml_inventories=xml_inventories,
# dataless_inventories=dataless_inventories,
# startday=FIRSTDAY,
# endday=LASTDAY,
# verbose=False)
#connect SQL database
SQL_db = os.path.join(DATABASE_DIR, 'timeline.db')
stations, subdir_len = psstationSQL.get_stationsSQL(SQL_db,
xml_inventories=xml_inventories,
dataless_inventories=dataless_inventories,
startday=FIRSTDAY,
endday=LASTDAY,
verbose=False)
stat_coords = np.asarray([station.coord for station in stations])
DECLUSTER = False
#if DECLUSTER:
# stat_coords = np.asarray([station.coord for station in stations])
# COORDS = Coordinates(input_list=stat_coords)
# declustered_coords = COORDS.decluster(degree_dist=0.1)
# stations = [station for station in stations if
# station.coord in declustered_coords]
# Loop on time interval
#number of time steps
N = int(((LASTDAY - FIRSTDAY).days + 1)*60*24 / XCORR_INTERVAL)
dates = [FIRSTDAY + dt.timedelta(minutes=i) for i in \
[j*XCORR_INTERVAL for j in range(N)]]
if RANDOM_STACK:
#print dates
# randomly shuffle dates
#dates = np.array(dates)
np.random.shuffle(dates)
#print dates
#begin = raw_input("\nPress enter to begin the program ")
# initialise preprocess class: METHOD - Bensen et al. (2007)
Preprocess = pspreprocess.Preprocess(FREQMIN, FREQMAX,
FREQMIN_EARTHQUAKE,
FREQMAX_EARTHQUAKE,
CORNERS, ZEROPHASE,
PERIOD_RESAMPLE,
WINDOW_TIME,
WINDOW_FREQ,
ONEBIT_NORM)
#loop on time-series. Date now represents XCORR_INTERVAL long time intervals
counter = 0
# only process the first 5% of the data to understand what to process
# further!
process_number = len(dates) / 20
if process_number < 34:
print "There is not enough information to process new workflow."
raise Exception("Either increase the number of days of raw data or decrease the cross-correlation interval.")
else:
process_number = int(process_number)
for date in dates[:process_number]:
print date
loop_time0 = dt.datetime.now()
print "\nProcessing data for date {} with a {} minute cross-correlation\
time-interval between times: {} and {}".format(date.date(), \
int(XCORR_INTERVAL) , date.time(), \
(date + dt.timedelta(minutes=XCORR_INTERVAL)).time())
iterate_stations = sorted(sta for sta in stations)
# =====================================================================
# check iterate stations have a file in the SQL database
# =====================================================================
# connect the database
conn = lite.connect(SQL_db)
# create cursor object
c = conn.cursor()
# convert to UTC timestamp to search in SQL database
search_start = (date - dt.timedelta(minutes=1) - epoch).total_seconds()
search_end = (date + dt.timedelta(minutes=XCORR_INTERVAL+1) - epoch).total_seconds()
# check if files have data within the time frame search_end-search_start
populated_stations = c.execute('SELECT station FROM file_extrema WHERE \
starttime <= ? AND endtime >= ?', (search_start, search_end))
populated_stations = list(it.chain(*list(populated_stations.fetchall())))
# filter stations with no data for the given time period of this loop!
for stat in iterate_stations:
stat_code = unicode('{}.{}.{}'.format(stat.network,
stat.name,
stat.channel))
if stat_code not in populated_stations:
iterate_stations.remove(stat)
# close timeline.db database
conn.close()
iterate_stations = iterate_stations[1:]
# =====================================================================
# =====================================================================
# subset if stations (if provided)
if CROSSCORR_STATIONS_SUBSET:
iterate_stations = [sta for sta in iterate_stations
if sta.name in CROSSCORR_STATIONS_SUBSET]
# =================
# processing traces
# =================
# =============================================================
# preparing functions that get one merged trace per station
# and pre-process trace, ready to be parallelized (if required)
# =============================================================
def get_merged_trace(station):
"""
Preparing func that returns one trace from selected station,
at current date. Function is ready to be parallelized.
"""
try:
trace = Preprocess.get_merged_trace(station=station,
date=date,
xcorr_interval=XCORR_INTERVAL,
skiplocs=CROSSCORR_SKIPLOCS,
minfill=MINFILL)
#plt.figure()
#plt.plot(trace.data)
#plt.show()
#plt.clf()
if total_verbose:
msg = 'merged'
print '{}.{} [{}] '.format(trace.stats.network,
trace.stats.station,
msg),
errmsg = None
except pserrors.CannotPreprocess as err:
# cannot preprocess if no trace or daily fill < *minfill*
trace = None
errmsg = '{}: skipping'.format(err)
except Exception as err:
# unhandled exception!
trace = None
errmsg = 'Unhandled error: {}'.format(err)
if errmsg:
# printing error message
if total_verbose:
print '{}.{} [{}] '.format(station.network,
station.name, errmsg),
return trace
def preprocessed_trace((trace, response)):
"""
Preparing func that returns processed trace: processing includes
removal of instrumental response, band-pass filtering, demeaning,
detrending, downsampling, time-normalization and spectral whitening
(see pscrosscorr.preprocess_trace()'s doc)
Function is ready to be parallelized.
"""
#if not trace or response is False:
# return
if not type(trace) is Trace:
print trace
#plt.figure()
#plt.plot(trace)
#plt.show()
#quit()
return
try:
Preprocess.preprocess_trace(trace=trace, paz=response,
verbose=False)
msg = 'ok'
if total_verbose:
print '{}.{} [{}] '.format(trace.stats.network,
trace.stats.station,
msg),
except pserrors.CannotPreprocess as err:
# cannot preprocess if no instrument response was found,
# trace data are not consistent etc. (see function's doc)
trace = None
print(err)
print 'skipping'
except Exception as err:
# unhandled exception!
trace = None
print(err)
print 'skipping'
# printing output (error or ok) message
# although processing is performed in-place, trace is returned
# in order to get it back after multi-processing
return trace
# ====================================
# getting one merged trace per station
# ====================================
merge_t0 = dt.datetime.now()
print '\nMerging traces ... '
if MULTIPROCESSING['merge trace']:
# multiprocessing turned on: one process per station
pool = mp.Pool(None)
traces = pool.map(get_merged_trace, iterate_stations)
pool.close()
pool.join()
else:
# multiprocessing turned off: processing stations one after another
traces = [get_merged_trace(s) for s in iterate_stations]
# =====================================================
# getting or attaching instrumental response
# (parallelization is difficult because of inventories)
# =====================================================
#print "traces1: ", traces
responses = []
for tr in traces:
if not tr:
responses.append(None)
continue
# responses elements can be (1) dict of PAZ if response found in
# dataless inventory, (2) None if response found in StationXML
# inventory (directly attached to trace) or (3) False if no
# response found
if RESP_REMOVE:
try:
response = Preprocess.get_or_attach_response(
trace=tr,
dataless_inventories=dataless_inventories,
xml_inventories=xml_inventories)
errmsg = None
except pserrors.CannotPreprocess as err:
# response not found
response = False
errmsg = '{}: skipping'.format(err)
except Exception as err:
# unhandled exception!
response = False
errmsg = 'Unhandled error: {}'.format(err)
responses.append(response)
if errmsg:
# printing error message
if total_verbose:
print '{}.{} [{}] '.format(tr.stats.network,
tr.stats.station,
errmsg),
else:
responses.append(None)
#print "traces2: ", traces
print '\nTraces merged and responses removed in {:.1f} seconds'\
.format((dt.datetime.now() - merge_t0).total_seconds())
# =================
# processing traces
# =================
print '\nPre-processing traces ... '
t0 = dt.datetime.now()
# must have more than one trace for cross-correlations!
#traces = np.array(traces)
#traces_check = traces[traces != np.array(None)]
#print "traces3: ", traces
if MULTIPROCESSING['process trace']:
# multiprocessing turned on: one process per station
pool = mp.Pool(NB_PROCESSES)
traces = pool.map(preprocessed_trace, zip(traces, responses))
pool.close()
pool.join()
else:
# multiprocessing turned off: processing stations one after another
try:
traces = map(preprocessed_trace, zip(traces, responses))
except:
continue
# setting up dict of current date's traces, {station: trace}
tracedict = {s.name: trace for s, trace in zip(iterate_stations,
traces) if trace}
delta = (dt.datetime.now() - t0).total_seconds()
print "\nProcessed traces in {:.1f} seconds".format(delta)
# create tmp folder for tracedict
#if not os.path.exists('tmp'): os.makedirs('tmp')
#dump the time interval's pre-processed items in tracedict to a pickle
#with open('tmp/preprocessed_tracedict.pickle', 'wb') as f:
# print "\nExporting pre-processed traces of time-series to: " + f.name
# pickle.dump(tracedict, f, protocol=2)
# import tracedict from output pickle produced with preprocess_total
#tracedict_pickle = 'tmp/preprocessed_tracedict.pickle'
#f = open(name=tracedict_pickle, mode='rb')
#tracedict = pickle.load(f)
#f.close()
# remove preprocessed tracedict pickle file
#if os.path.isfile(tracedict_pickle): os.remove(tracedict_pickle)
# ======================================================
# stacking cross-correlations of the current time-series
# ======================================================
#if len(tracedict) < 2:
# print "No cross-correlation for this interval"
# continue
t0 = dt.datetime.now()
xcorrdict = {}
if MULTIPROCESSING['cross-corr']:
# if multiprocessing is turned on, we pre-calculate cross-correlation
# arrays between pairs of stations (one process per pair) and feed
# them to xc.add() (which won't have to recalculate them)
print "\nProcessing cross-correlations ..."
def xcorr_func(pair):
"""
Preparing func that returns cross-correlation array
beween two traces
"""
(s1, tr1), (s2, tr2) = pair
print '{}-{} '.format(s1, s2),
shift = int(CROSSCORR_TMAX / PERIOD_RESAMPLE)
xcorr = obspy.signal.cross_correlation.xcorr(
tr1, tr2, shift_len=shift, full_xcorr=True)[2]
#plt.figure()
#plt.title("xcorr 1 ")
#plt.plot(xcorr)
#plt.show()
#plt.clf()
return xcorr
pairs = list(it.combinations(sorted(tracedict.items()), 2))
pool = mp.Pool(NB_PROCESSES)
xcorrs = pool.map(xcorr_func, pairs)
pool.close()
pool.join()
xcorrdict = {(s1, s2): xcorr for ((s1, _), (s2, _)),
xcorr in zip(pairs, xcorrs)}
print
print "Stacking cross-correlations"
xc.add(tracedict=tracedict,
stations=stations,
xcorr_tmax=CROSSCORR_TMAX,
xcorrdict=xcorrdict,
date=date,
verbose=not MULTIPROCESSING['cross-corr'])
pairs = list(it.combinations(sorted(tracedict.items()), 2))
# calculate max snr for snr weighted stack!
# for pair in pairs:
# (s1, tr1), (s2, tr2) = pair
# s1, s2 = str(s1), str(s2)
# snr_list = xc[s1][s2].SNR_list
# max_snr = np.max(snr_list)
# snr_stack = xc[s1][s2].SNR_stack
# snr_wstack = np.zeros_like(snr_stack[0])
# for xcorr, snr in zip(snr_stack, snr_list):
# snr_wstack += xcorr * snr / max_snr
# assign final snr weighted stack xcorr green's function to SNR_stack
# xc[s1][s2].SNR_stack = snr_wstack
# if s1 in xc.keys():
# if s2 in xc[s1].keys():
# pws = xc[s1][s2].pws
# plt.figure()
# plt.plot(pws)
# plt.show()
# plt.clf()
#==============================================================================
delta = (dt.datetime.now() - t0).total_seconds()
#print "\nCalculated and stacked {} cross-correlations in \
#{:.1f} seconds".format(len(xcorrs), delta)
loop_delta = (dt.datetime.now() - loop_time0).total_seconds()
print "\nIn total, the previous loop had a processing time of: \
{:.1f} seconds".format(loop_delta)
#there may be an overlap in metadata times. Maybe add xcorr interval to this?
#have the program restart from the beginning of each stack! not each month
#this gives more redundancy!
#(allows to restart after a crash from that date)
# save partial pickle file only if timeseries loop is large enough
#if len(metadata) >= 2:
# print "Time since last save: ", abs(date - metadata[-1] + dt.timedelta(minutes=XCORR_INTERVAL))
#if len(metadata) % 10 == 0:# (date - metadata[-1]) >= dt.timedelta(hours=1):
# with open(u'{}.part.pickle'.format(OUTFILESPATH), 'wb') as f:
# print "\nExporting cross-correlations calculated until now."
# pickle.dump(xc, f, protocol=2)
# metadata.append(date)
#elif len(metadata) == 0:
# metadata.append(date)
#also create a metadata dump file for use only if the program needs to be restarted
#use replace() to get rid of basename to create file named metadata.pickle in
#correct path
#with open(u'{}.part.pickle'.format(OUTFILESPATH), 'wb') as f:
# print "\nExporting cross-correlations calculated until now."
# pickle.dump(xc, f, protocol=2)
with open(METADATA_PATH, 'wb') as f:
print "\nExporting re-start metadata of time-series calculated until \
now."
pickle.dump(metadata, f, protocol=2)
# exporting cross-correlations
if not xc.pairs():
print "No cross-correlation calculated: nothing to export!"
else:
# exporting to binary and ascii files
print "Exporting updated cross-correlations to file: ", OUTFILESPATH
xc.export(outprefix=OUTFILESPATH+'.part', stations=stations, verbose=False)
total_delta = (dt.datetime.now() - total_time0).total_seconds()
print "Calculated every xcorr in time-series in in \
{:.1f} seconds".format(total_delta)
if not xc.pairs():
print "No cross-correlation could be calculated: nothing to export!"
else:
# exporting to binary and ascii files
xc.export(outprefix=OUTFILESPATH, stations=stations, verbose=True)
# exporting to png file
print "Exporting cross-correlations to file: {}.png".format(OUTFILESPATH)
# optimizing time-scale: max time = max distance / vmin (vmin = 2.5 km/s)
maxdist = max([xc[s1][s2].dist() for s1, s2 in xc.pairs()])
maxt = min(CROSSCORR_TMAX, maxdist / 2.5)
if PLOT_DISTANCE:
#plot distance plot of cross-correlations
xc.plot(plot_type='distance', xlim=(-maxt, maxt),
outfile=os.path.join(OUTFOLDERS, OUTFILESNAME)\
+ '.png', showplot=False)
#xc.plot(plot_type='distance', xlim=(-maxt, maxt),
# outfile=os.path.join(OUTFOLDERS, OUTFILESNAME)\
# + '.png', showplot=False, stack_type='PWS')
#xc.plot(plot_type='distance', xlim=(-maxt, maxt),
# outfile=os.path.join(OUTFOLDERS, OUTFILESNAME)\
# + '.png', showplot=False, stack_type='SNR')
# xc.plot(plot_type='distance', xlim=(-maxt, maxt),
# outfile=os.path.join(OUTFOLDERS, OUTFILESNAME)\
# + '.png', showplot=False, stack_type='combined')
if PLOT_CLASSIC:
#plot individual cross-correlations
xc.plot(plot_type='classic', xlim=(-maxt, maxt),
outfile=OUTFOLDERS, showplot=False)
#xc.plot_SNR(plot_type='individual', outfile=OUT_SNR)
# removing file containing periodical exports of cross-corrs
# only do this if the full file exists!
try:
os.remove(u'{}.part.pickle'.format(OUTFILESPATH))
except:
pass
quit()
#remove_config(config_file)
|
gpl-3.0
|
kaichogami/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
34
|
30531
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from distutils.version import LooseVersion
from sklearn.utils.extmath import svd_flip
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
import scipy
pinv2_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
pinv2_args = {'check_finite': False}
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = linalg.pinv2(X, **pinv2_args)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
**pinv2_args))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
**pinv2_args))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
abalkin/numpy
|
numpy/linalg/linalg.py
|
3
|
89527
|
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg')
fortran_int = intc
@set_module('numpy.linalg')
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assert_stacked_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assert_finite(*arrays):
for a in arrays:
if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def _tensorsolve_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def _solve_dispatcher(a, b):
return (a, b)
@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
See Also
--------
scipy.linalg.solve : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def _tensorinv_dispatcher(a, ind=None):
return (a,)
@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def _unary_dispatcher(a):
return (a,)
@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
See Also
--------
scipy.linalg.inv : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.25, 0.75],
[ 0.75, -0.25]]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def _matrix_power_dispatcher(a, n):
return (a,)
@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered".
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
try:
n = operator.index(n)
except TypeError as e:
raise TypeError("exponent must be an integer") from e
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
@array_function_dispatch(_unary_dispatcher)
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. No
checking is performed to verify whether `a` is Hermitian or not.
In addition, only the lower-triangular and diagonal elements of `a`
are used. Only `L` is actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
See Also
--------
scipy.linalg.cholesky : Similar function in SciPy.
scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
positive-definite matrix.
scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
`scipy.linalg.cho_solve`.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[1.+0.j, 0.-2.j],
[0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> np.linalg.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decomposition
def _qr_dispatcher(a, mode=None):
return (a,)
@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
See Also
--------
scipy.linalg.qr : Similar function in SciPy.
scipy.linalg.rq : Compute RQ decomposition of a matrix.
Notes
-----
This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
``dorgqr``, and ``zungqr``.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = np.linalg.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(np.linalg.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assert_2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def _eigvalsh_dispatcher(a, UPLO=None):
return (a,)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
scipy.linalg.eigvalsh : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712]) # may vary
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
(conjugate symmetric) array.
scipy.linalg.eig : Similar function in SciPy that also solves the
generalized eigenvalue problem.
scipy.linalg.schur : Best choice for unitary and other non-Hermitian
normal matrices.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent and `a` can be diagonalized by
a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
is preferred because the matrix `v` is guaranteed to be unitary, which is
not the case when using `eig`. The Schur factorization produces an
upper triangular matrix rather than a diagonal matrix, but for normal
matrices only the diagonal of the upper triangular matrix is needed, the
rest is roundoff error.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``y.T @ a = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([1., 2., 3.])
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([1.+1.j, 1.-1.j])
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([2.+0.j, 0.+0.j])
array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
[ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([1., 1.])
array([[1., 0.],
[0., 1.]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a complex Hermitian
(conjugate symmetric) or a real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian or real symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
scipy.linalg.eigh : Similar function in SciPy (but also solves the
generalized eigenvalue problem).
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
``_heevd``.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
[ 0. +0.89442719j, 0. -0.4472136j ]])
array([[ 0.89442719+0.j , -0. +0.4472136j],
[-0. +0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
return (a,)
@array_function_dispatch(_svd_dispatcher)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
scipy.linalg.svd : Similar function in SciPy.
scipy.linalg.svdvals : Compute singular values of a matrix.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
import numpy as _nx
a, wrap = _makearray(a)
if hermitian:
# note: lapack svd returns eigenvalues with s ** 2 sorted descending,
# but eig returns s sorted ascending, so we re-order the eigenvalues
# and related arrays to have the correct order
if compute_uv:
s, u = eigh(a)
sgn = sign(s)
s = abs(s)
sidx = argsort(s)[..., ::-1]
sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
s = _nx.take_along_axis(s, sidx, axis=-1)
u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = transpose(u * sgn[..., None, :]).conjugate()
return wrap(u), s, wrap(vt)
else:
s = eigvalsh(a)
s = s[..., ::-1]
s = abs(s)
return sort(s)[..., ::-1]
_assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def _cond_dispatcher(x, p=None):
return (x,)
@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746 # may vary
>>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
if _is_empty_2d(x):
raise LinAlgError("cond is not defined on empty arrays")
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assert_stacked_2d(x)
_assert_stacked_square(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def _matrix_rank_dispatcher(M, tol=None, hermitian=None):
return (M,)
@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
M : {(M,), (..., M, N)} array_like
Input vector or stack of matrices.
tol : (...) array_like, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Returns
-------
rank : (...) array_like
Rank of M.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False, hermitian=hermitian)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def _pinv_dispatcher(a, rcond=None, hermitian=None):
return (a,)
@array_function_dispatch(_pinv_dispatcher)
def pinv(a, rcond=1e-15, hermitian=False):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
See Also
--------
scipy.linalg.pinv : Similar function in SciPy.
scipy.linalg.pinv2 : Similar function in SciPy (SVD-based).
scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
Hermitian matrix.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
scipy.linalg.det : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def _lstsq_dispatcher(a, b, rcond=None):
return (a, b)
@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
Computes the vector x that approximatively solves the equation
``a @ x = b``. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of `a` can be less than,
equal to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation. Else, `x` minimizes the
Euclidean 2-norm :math:`|| b - a x ||`.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
See Also
--------
scipy.linalg.lstsq : Similar function in SciPy.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> m, c
(1.0 -0.95) # may vary
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> _ = plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
# FIXME: real_t is unused
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=3)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
if n_rhs == 0:
# lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
if m == 0:
x[...] = 0
if n_rhs == 0:
# remove the item we added
x = x[..., :n_rhs]
resids = resids[..., :n_rhs]
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by `numpy.linalg.norm()`.
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=False), axis=-1)
return result
def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
return (x,)
@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object. The default is None.
axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
is None.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
See Also
--------
scipy.linalg.norm : Similar function in SciPy.
Notes
-----
For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices and raise a ValueError when ``x.ndim != 2``.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception as e:
raise TypeError("'axis' must be None, an integer or a tuple of integers") from e
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
# None of the str-type keywords for ord ('fro', 'nuc')
# are valid for vectors
elif isinstance(ord, str):
raise ValueError(f"Invalid norm order '{ord}' for vectors")
else:
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= (1 / ord)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def _multidot_dispatcher(arrays, *, out=None):
yield from arrays
yield out
@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays, *, out=None):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a, b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.19.0
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random((10000, 100))
>>> B = np.random.random((100, 1000))
>>> C = np.random.random((1000, 5))
>>> D = np.random.random((5, 333))
>>> # the actual dot multiplication
>>> _ = multi_dot([A, B, C, D])
instead of::
>>> _ = np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> _ = A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1], out=out)
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1, out=out)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C, out=None):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C, out=out)
else:
return dot(A, dot(B, C), out=out)
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j, out=None):
"""Actually do the multiplication with the given order."""
if i == j:
# the initial call with non-None out should never get here
assert out is None
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j),
out=out)
|
bsd-3-clause
|
berianjames/pyBAST
|
examples/sdss.py
|
1
|
11428
|
#!/usr/bin/env python
"""
This file is part of pyBAST, Bayesian Astrometry
Copyright (C) Joshua S. Bloom. All Rights Reserved. 2012.
See the license file as part of this project on github.
usage: see the associated ipython notebook in this folder.
Code:
https://github.com/berianjames/pyBAST
"""
import os, sys, string,urllib2,urllib,copy
from math import log10, radians, pi, sin, cos, atan2, sqrt
import time, traceback, datetime
import threading
import StringIO
import numpy as np
from matplotlib.mlab import csv2rec
class sdssq(object):
"""
query object for Stripe82
"""
#dr_url="http://cas.sdss.org/astrodr7/en/tools/search/x_sql.asp"
dr_url="http://cas.sdss.org/stripe82/en/tools/search/x_sql.asp"
formats = ['csv','xml','html']
def_fmt = "csv"
sdss_coadd_platescale = 0.396127 ## arcsec/pix
def_t0 = 5.21972623E4 # fidual start time (days)
def _filtercomment(self,sql):
"Get rid of comments starting with --. Function from Tomas Budavari's code (sqlcl.py)"
fsql = ''
for line in sql.split('\n'):
fsql += line.split('--')[0] + ' ' + os.linesep
return fsql
def recquery(self,sql,url=dr_url,fmt=def_fmt):
"""
makes a recarray of the query results
"""
rez = self._query(sql,url=url,fmt=fmt)
#print rez.readlines()
#rez.seek(0)
tmp = rez.readline()
rez.seek(0)
if len(tmp) == 0 or tmp.find("error_message") != -1 or tmp.find("ERROR") != -1:
print "rez:"
print rez.readlines()
return np.zeros((1,)).view(np.recarray)
try:
return csv2rec(rez)
except:
print "err"
print rez.readlines()
def _query(self,sql,url=dr_url,fmt=def_fmt,verbose=False):
"Run query and return file object"
fsql = self._filtercomment(sql)
if verbose:
print fsql
params = urllib.urlencode({'cmd': fsql, 'format': fmt})
try:
return StringIO.StringIO(urllib2.urlopen(url+'?%s' % params).read())
except:
print "TRIED: " + url+'?%s' % params
print "EXCEPT: sdss.py._query()"
return StringIO.StringIO() # This is an empty filehandler
def _old_get_master_cat(self,pos=(342.19156468,-0.90203402),errdeg=0.05,rcut=22.5):
## I'm not using the full HTM here
ramin = pos[0] - 0.5*errdeg/np.cos(pos[1]) ; ramax = pos[0] + 0.5*errdeg/np.cos(pos[1])
decmin = pos[1] - 0.5*errdeg ; decmax = pos[1] + 0.5*errdeg
rc = "and p.r < %f" % rcut if rcut not in [None] else ""
q = """SELECT p.objid,p.ra,p.dec,p.rowc,p.rowcErr,p.colc,p.colcErr,
p.u,p.g,p.r,p.i,p.z,
p.run,p.rerun,p.camcol,p.field
FROM dbo.fGetObjFromRect(%f,%f,%f,%f) n
join PhotoObjAll p on n.objID=p.objID
WHERE
n.run in (106,206)
%s
""" % (ramin,ramax,decmin,decmax, rc)
def dist(self,lon0, lat0, lon, lat):
"""
Calculates the distance between two points (decimal)
"""
d_lat = radians(lat0 - lat)
d_lon = radians(lon0 - lon)
x = sin(d_lat/2.) ** 2 + \
cos(radians(lat0)) * cos(radians(lat)) *\
sin(d_lon/2.) ** 2
y = 2.0 * atan2(sqrt(x), sqrt(1.0 - x))
distance = y*180.0/pi
return distance
def write_match_files(self,master,pos=(342.1913750,-0.9019444), scale=sdss_coadd_platescale,\
runids=[4198],t0=def_t0):
self.generated_match_files = []
if isinstance(runids,str):
if runids == "all":
runids = list(set(master.run))
else:
print "I dont understnd runids = %s" % repr(runids)
return
## figure out the special ID of the source we're interested in
mind = 100.0
bestid = -1
gotit = False
mpos = tuple()
for x in master:
d = self.dist(pos[0],pos[1],x["ra"],x["dec"])
if d < mind:
mind = d
bestid = x["master_objid"]
mpos = (x["master_ra"],x["master_dec"])
if mind < 1.5/(3600.0):
gotit= True
break
if gotit:
print "source of interest: mind=%.3f arcsec sourceid=%i" % (mind*3600, bestid)
print " master position = %s" % repr(mpos)
else:
print "couldn't find the source after %i sources searched" % len(master)
## make conversion table for RA,DEC
convra = lambda ra: (ra - pos[0])*np.cos(pos[1])*3600.0
convdec = lambda dec: (dec - pos[1])*3600.0
for r in runids:
## get only the matches for this run
tmp = master[np.where(master.run == r)]
fname = "match_astrom_%.4f_%.5f_run%i.dat" % (pos[0],pos[1],r)
f = open(fname,"w")
f.write("# filename: %s \n" % fname)
f.write("# nominal center (used for x,y conversion): %f, %f\n" % (pos[0],pos[1]))
if gotit:
f.write("# source of interest\n")
f.write("# master_objid: %i\n# master_pos: %f, %f\n" % \
(bestid,mpos[0],mpos[1]))
ttt = np.where(tmp["master_objid"] == bestid)
f.write("# fiducal master location (delta ra, delta dec): %f, %f\n" % \
(convra(mpos[0]),convdec(mpos[1])))
if len(ttt[0]) > 0:
f.write("# converted source location (delta ra, delta dec): %f, %f\n" % \
(convra(tmp[ttt]["ra"]),convdec(tmp[ttt]["dec"])))
f.write("# source location (ra, dec): %f, %f\n" % \
(tmp[ttt]["ra"],tmp[ttt]["dec"]))
f.write("# source error (raerr, decerr): %f, %f\n" % \
(tmp[ttt]["raerr"],tmp[ttt]["decerr"]))
else:
f.write("# could not find master source of interest\n")
f.write("# observation time (day): %f\n# t0 (day): %f\n" % (tmp["time"][0],t0))
f.write("master_objid,objid,master_ra,master_dec,master_dra,master_ddec")
f.write(",master_raerr,master_decerr,master_radeccov,ra,dec,dra,ddec,raerr,decerr,radeccov")
f.write(",master_rmag,rmag\n")
for x in tmp[np.where(tmp["master_objid"] != bestid)]:
f.write("%i,%i" % (x["master_objid"],x["objid"]))
f.write(",%f,%f,%f,%f" % (x["master_ra"],x["master_dec"],\
convra(x["master_ra"]),convdec(x["master_dec"])))
f.write(",%f,%f,0.0" % (x["master_raerr"],x["master_decerr"]))
f.write(",%f,%f,%f,%f" % (x["ra"],x["dec"],\
convra(x["ra"]),convdec(x["dec"])))
f.write(",%f,%f,0.0" % (x["raerr"],x["decerr"]))
f.write(",%f,%f\n" % (x["master_rmag"],x["rmag"]))
f.close()
print "wrote %i matches in file %s" % (len(tmp[np.where(tmp["master_objid"] != bestid)]),fname)
self.generated_match_files.append((fname,len(tmp[np.where(tmp["master_objid"] != bestid)]),tmp["time"][0]))
## sort the generated file list by time
self.generated_match_files.sort(key=lambda x: x[2])
def get_master_cat(self,pos=(342.1913750, -0.9019444),errdeg=0.07,rcut=22.5,t0=def_t0,\
MASTER_PRE = "master_sdss",savefile=True):
"""
issue the monsterous nested query to get master catalog positions in Stripe82 over time
"""
master_name = MASTER_PRE + "%.4f_%.5f.npz" % pos
if os.path.exists(master_name):
npzfile = np.load(master_name)
return npzfile["master"].view(np.recarray)
ramin = pos[0] - 0.5*errdeg/np.cos(pos[1]) ; ramax = pos[0] + 0.5*errdeg/np.cos(pos[1])
decmin = pos[1] - 0.5*errdeg ; decmax = pos[1] + 0.5*errdeg
rc = "and p.r < %f" % rcut if rcut not in [None] else ""
### Runs 106 and 206 are the deep coadds from Stripe82. Get the master catalog
### about the input position
q = """SELECT cc.rr0objid as master_objid,cc.rr0ra as master_ra,cc.qdec as master_dec,
cc.master_decerr,cc.master_raerr,cc.r as master_rmag, cc.darcsec,p.objid,p.r as rmag,
cc.ra,cc.dec,p.rowcErr * %f as raerr,p.colcErr * %f as decerr,F.mjd_r - %f as time,p.run,p.rerun,p.camcol,p.field
from (SELECT p.r, rr0.colcErr * %f as master_decerr, rr0.rowcErr * %f as master_raerr, dbo.fdistancearcmineq(rr0.ra,rr0.dec,q.ra,q.dec)*60 as darcsec,q.ra,q.dec,
p.htmid, q.objid,rr0.objid as rr0objid, rr0.ra as rr0ra,rr0.dec as qdec from
(SELECT p.objid,p.ra,p.dec,p.rowc,p.rowcErr,p.colc,p.colcErr,
p.u,p.g,p.r,p.i,p.z,
p.run,p.rerun,p.camcol,p.field
FROM dbo.fGetObjFromRect(%f,%f,%f,%f) n
join PhotoObjAll p on n.objID=p.objID
WHERE
n.run in (106,206)
%s) as rr0, PhotoObj p,PhotoObj q
where rr0.objid = p.objid
and q.htmid between 3000*(p.htmid/3000) and 3000*(p.htmid/3000+2) and
q.objid != p.objid
and dbo.fdistancearcmineq(rr0.ra,rr0.dec,q.ra,q.dec)*60 < 2 ) as cc
join PhotoObjAll p on cc.objid=p.objID
join Field F on F.fieldID = p.fieldID
-- order by
-- time
""" % (sdss_coadd_platescale, sdss_coadd_platescale, t0, \
sdss_coadd_platescale, sdss_coadd_platescale, ramin,ramax,decmin,decmax, rc)
#print q
rez = self.recquery(q)
if savefile and not os.path.exists(master_name):
np.savez(master_name,master=rez)
return rez
def test1(self):
q = """SELECT TOP 10 objid,ra,dec,u,g,r,i,z,
run,rerun,camcol,field
FROM PhotoObj
WHERE
u BETWEEN 0 AND 19.6
AND g BETWEEN 0 AND 20
and run in (106,206)
"""
print self._query(q).readlines()
def test2(self):
q = """SELECT TOP 10 objid,ra,dec,u,g,r,i,z,
run,rerun,camcol,field
FROM PhotoObj
WHERE
u BETWEEN 0 AND 19.6
AND g BETWEEN 0 AND 20
and run in (106,206)
"""
return self.recquery(q)
def test3(self):
m = self.get_master_cat()
t = self.write_match_files(m)
"""
In [4]: b[5].ra, b[5].dec
Out[4]: (342.19783344000001, -0.89816640000000003)
In [5]: b[0].ra, b[0].dec
Out[5]: (342.19151775, -0.90201332000000001)
/usr/stsci/wcstools-3.7.3/bin.macintel/skycoor -vr 342.19783344000001 -0.89816640000000003 342.19151775 -0.90201332000000001
Distance is 26.620 arcsec
dRA = -22.73368 arcsec, dDec = -13.84891 arcsec
In [8]: ((b[5].colc - b[0].colc)**2 + (b[5].rowc - b[0].rowc)**2)**(0.5)
Out[8]: 67.227760180758793
In [9]: 26.620/67.2277601807
Out[9]: 0.3959673790774629
"""
|
mit
|
neale/CS-program
|
434-MachineLearning/final_project/linearClassifier/sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
unlicense
|
Kmayankkr/robocomp
|
tools/AGM/tools/AGGLEditor.py
|
2
|
31833
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ------------------------
# ----- AGGLEditor -----
# ------------------------
#
# A free/libre open-source graph grammar drawing tool.
#
# Copyright (C) 2012-2017 by Luis J. Manso
#
# AGGLEditor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AGGLEditor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AGGLEditor. If not, see <http://www.gnu.org/licenses/>.
# Ctrl+c handling
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Python distribution imports
import sys, traceback, os, re, threading, time, string, math
# Qt interface
import PySide
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSvg import *
import Image, ImageOps
import numpy as np
from inspect import currentframe, getframeinfo
sys.path.append('/usr/local/share/agm')
from ui_guiAGGLEditor import Ui_MainWindow
from ui_agglTypeEditor import Ui_TypeEditor
from ui_appearance import Ui_Appearance
from parseAGGL import *
global vertexDiameter
vertexDiameter = 40
global nodeThickness
nodeThickness = 2.5
global lineThickness
lineThickness = 2.5
global longPattern
longPattern = 3
global shortPattern
shortPattern = 1
global spacePattern
spacePattern = 3
global dashPattern
dashPattern = []
global fontName
fontName = "Arial"
global fontSize
fontSize = 14
from AGMModule import *
import networkx as nx
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['backend.qt4'] = 'PySide'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import ( FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
from PySide import QtGui, QtCore
import random
from weakref import proxy
class Plotter(FigureCanvas):
def __init__(self, parent):
''' plot some random stuff '''
self.parent = proxy(parent)
self.fig = Figure()
super(Plotter,self).__init__(self.fig)
# create an axis
self.axes = self.fig.add_subplot(111)
def plot(self, data):
self.axes.clear()
g = nx.DiGraph()
for symbolType in data:
g.add_node(symbolType)
for d in data[symbolType]:
g.add_edge(symbolType, d, weight=1.0)
nodes = data.keys()
labels = {}
for n in nodes:
labels[n] = n
from networkx.drawing.nx_agraph import graphviz_layout
# nx.draw(g, ax=self.axes, node_size=311, node_color='w', font_weight='bold', linewidths=0, font_size=18, nodelist=nodes, labels=labels)
nx.draw(g, pos=graphviz_layout(g, prog='dot'), ax=self.axes, node_size=311, node_color='w', font_weight='bold', linewidths=0, font_size=18, nodelist=nodes, labels=labels)
self.fig.canvas.draw()
def binding_plotter_with_ui(self):
self.parent.verticalLayout.insertWidget(2, self)
class TypeEditor(QWidget, Ui_TypeEditor):
def __init__(self):
QWidget.__init__(self)
self.setupUi(self)
self.plotter = Plotter(self)
self.plotter.binding_plotter_with_ui()
def plot(self, data):
self.plotter.plot(data)
class AGMEditor(QMainWindow):
def __init__(self, filePath=''):
QMainWindow.__init__(self)
self.filePath = filePath
self.modified = False
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Type Hierarchy
self.typeHierarchyWidget = TypeEditor()
cursorSize = 25
self.cursor = {}
self.cursor['add node'] = QCursor(QPixmap('/usr/local/share/agm/icons/nodeadd.png').scaled(cursorSize,cursorSize))
self.cursor['remove node'] = QCursor(QPixmap('/usr/local/share/agm/icons/noderemove.png').scaled(cursorSize,cursorSize))
self.cursor['rename node'] = QCursor(QPixmap('/usr/local/share/agm/icons/noderename.png').scaled(cursorSize,cursorSize))
self.cursor['change type'] = QCursor(QPixmap('/usr/local/share/agm/icons/noderetype.png').scaled(cursorSize,cursorSize))
self.cursor['move node'] = QCursor(QPixmap('/usr/local/share/agm/icons/nodemove.png').scaled(cursorSize,cursorSize))
self.cursor['add edge'] = QCursor(QPixmap('/usr/local/share/agm/icons/linkadd.png').scaled(cursorSize,cursorSize))
self.cursor['remove edge'] = QCursor(QPixmap('/usr/local/share/agm/icons/linkremove.png').scaled(cursorSize,cursorSize))
self.cursor['change label'] = QCursor(QPixmap('/usr/local/share/agm/icons/linktype.png').scaled(cursorSize,cursorSize))
self.cursor['negate edge'] = QCursor(QPixmap('/usr/local/share/agm/icons/linknegate.png').scaled(cursorSize,cursorSize))
# Graph painters
self.lhsPainter = GraphDraw(self.ui.lhsParentWidget, self, "LHS")
self.rhsPainter = GraphDraw(self.ui.rhsParentWidget, self, "RHS")
self.timer = QTimer()
self.tool = ''
self.statusBar().hide()
self.connect(self.timer, SIGNAL('timeout()'), self.draw)
# self.connect(self.ui.toolsList, SIGNAL('currentRowChanged(int)'), self.selectTool)
self.connect(self.ui.rulesList, SIGNAL('currentRowChanged(int)'), self.changeRule)
self.connect(self.ui.actionChangeAppearance, SIGNAL("triggered(bool)"), self.changeAppearance)
self.connect(self.ui.actionTypeHierarchy, SIGNAL("triggered(bool)"), self.showTypeHierarchy)
self.connect(self.ui.actionAddRule, SIGNAL("triggered(bool)"), self.addRule)
self.connect(self.ui.actionRemoveCurrentRule, SIGNAL("triggered(bool)"), self.removeCurrentRule)
self.connect(self.ui.actionRenameCurrentRule, SIGNAL("triggered(bool)"), self.renameCurrentRule)
self.connect(self.ui.actionExport, SIGNAL("triggered(bool)"), self.exportRule)
self.connect(self.ui.actionGenerateCode, SIGNAL("triggered(bool)"), self.generateCode)
self.connect(self.ui.actionGenerateAGGLPlanner, SIGNAL("triggered(bool)"), self.generateAGGLPlannerCode)
self.connect(self.ui.actionExportAllRules, SIGNAL("triggered(bool)"), self.exportAll)
self.connect(self.ui.actionExportAllRulesPNG, SIGNAL("triggered(bool)"), self.exportAllPNG)
self.connect(self.ui.actionSaveAs, SIGNAL("triggered(bool)"), self.saveAs)
self.connect(self.ui.actionSave, SIGNAL("triggered(bool)"), self.save)
self.connect(self.ui.actionOpen, SIGNAL("triggered(bool)"), self.open)
self.connect(self.ui.actionQuit, SIGNAL("triggered(bool)"), self.appClose)
self.connect(self.ui.actionGraphmar, SIGNAL("triggered(bool)"), self.about)
self.connect(self.ui.actionHideToolNames, SIGNAL("triggered(bool)"), self.hideToolNames)
self.connect(self.ui.passiveCheckBox, SIGNAL("stateChanged(int)"), self.changePassive)
self.connect(self.ui.hierarchicalCheckBox, SIGNAL("stateChanged(int)"), self.changeHierarchical)
self.connect(self.ui.cost, SIGNAL("valueChanged(int)"), self.changeCost)
self.ui.actionSaveAs.setShortcut(QKeySequence( Qt.CTRL + Qt.Key_S + Qt.Key_Shift))
self.ui.actionSave.setShortcut(QKeySequence( Qt.CTRL + Qt.Key_S))
self.ui.actionOpen.setShortcut(QKeySequence( Qt.CTRL + Qt.Key_O))
self.ui.actionQuit.setShortcut(QKeySequence(Qt.CTRL + Qt.Key_Q))
#self.ui.actionNextRule.setShortcut(QKeySequence(Qt.Key_PageDown))
#self.ui.actionPrevRule.setShortcut(QKeySequence(Qt.Key_PageUp))
#self.ui.splitter.setStyleSheet("QSplitter::handle { background-color: gray }")
self.connect(self.ui.textParameters, SIGNAL("textChanged()"), self.textParametersChanged)
self.connect(self.ui.textPrecondition, SIGNAL("textChanged()"), self.textPreconditionChanged)
self.connect(self.ui.textEffect, SIGNAL("textChanged()"), self.textEffectChanged)
# self.connect(self.ui.comboRuleTextEdit, SIGNAL("textChanged()"), self.comboTextChanged)
self.shortcutDown = QShortcut(QKeySequence("PgDown"), self)
self.shortcutUp = QShortcut(QKeySequence("PgUp" ), self)
self.connect(self.shortcutDown, SIGNAL("activated()"), self.pgDown)
self.connect(self.shortcutUp, SIGNAL("activated()"), self.pgUp)
self.connect(self.ui.addnodebutton, SIGNAL('toggled(bool)'), self.on_addnodebutton)
self.connect(self.ui.removenodebutton, SIGNAL('toggled(bool)'), self.on_removenodebutton)
self.connect(self.ui.renamenodebutton, SIGNAL('toggled(bool)'), self.on_renamenodebutton)
self.connect(self.ui.changenodetypebutton, SIGNAL('toggled(bool)'), self.on_changenodetypebutton)
self.connect(self.ui.nodemovebutton, SIGNAL('toggled(bool)'), self.on_nodemovebutton)
self.connect(self.ui.addedgebutton, SIGNAL('toggled(bool)'), self.on_addedgebutton)
self.connect(self.ui.removeedgebutton, SIGNAL('toggled(bool)'), self.on_removeedgebutton)
self.connect(self.ui.changeedgelabelbutton, SIGNAL('toggled(bool)'), self.on_changeedgelabelbutton)
self.connect(self.ui.negateedgebutton, SIGNAL('toggled(bool)'), self.on_negateedgebutton)
self.connect(self.typeHierarchyWidget.typeList, SIGNAL("currentRowChanged(int)"), self.currentTypeChanged)
# self.connect(self.typeHierarchyWidget.typeList, SIGNAL("currentItemChanged(QListWidgetItem)"), self.currentTypeChanged)
# self.connect(self.typeHierarchyWidget.typeList, SIGNAL("itemPressed(QWidgetItem)"), self.currentTypeChanged)
self.connect(self.typeHierarchyWidget.newButton, SIGNAL("clicked()"), self.createType)
self.connect(self.typeHierarchyWidget.renameButton, SIGNAL("clicked()"), self.renameType)
self.connect(self.typeHierarchyWidget.includeButton, SIGNAL("clicked()"), self.includeTypeInheritance)
self.connect(self.typeHierarchyWidget.removeButton, SIGNAL("clicked()"), self.removeTypeInheritance)
self.connect(self.typeHierarchyWidget.okButton, SIGNAL("clicked()"), self.typeHierarchyWidget.hide)
self.hideToolNames(False)
# Get settings
settings = QSettings("AGM", "mainWindowGeometry")
value = settings.value("geometry")
if value != None:
self.restoreGeometry(value)
self.timer.start(150)
# self.ui.toolsList.setCurrentRow(4)
# self.selectTool(4)
# self.ui.toolsList.setCurrentRow(4)
# self.selectTool(4)
global vertexDiameter
global fontName
global fontSize
self.agmData = AGMFileData()
if len(filePath)>0:
self.openFromFile(filePath)
try:
vertexDiameter = self.agmData.properties['vertexDiameter']
except:
pass
try:
global nodeThickness
nodeThickness = self.agmData.properties['nodeThickness']
except:
pass
try:
global lineThickness
lineThickness = self.agmData.properties['lineThickness']
except:
pass
try:
global dashPattern
dashPattern = []
except:
pass
try:
fontName = self.agmData.properties['fontName']
except:
pass
try:
fontSize = self.agmData.properties['fontSize']
except:
pass
else:
self.addRule()
frameinfo = getframeinfo(currentframe())
frameinfo = getframeinfo(currentframe())
self.ui.rulesList.setCurrentRow(0)
self.ui.rulesList.setFocus(Qt.OtherFocusReason)
# Node appearance
self.appearance = Appearance()
self.appearance.ui.radius.setValue(vertexDiameter)
# Font
font = QFont(fontName, fontSize, weight=0, italic=False)
font.setStyle(QFont.StyleNormal)
self.fontDialog = QFontDialog(font, self)
self.fontDialog.setCurrentFont(font)
self.font = self.fontDialog.currentFont()
self.connect(self.ui.actionChangeFont, SIGNAL("triggered(bool)"), self.changeFont)
# Sizes
self.show()
sh = self.ui.centralwidget.height()
self.ui.splitter.setSizes([int(0.65*sh), int(0.35*sh)])
self.tool = 'move node'
self.uncheckOtherTools(self.tool)
def pgDown(self):
r = self.ui.rulesList.currentRow()+1
if r>=0 and r<self.ui.rulesList.count():
self.ui.rulesList.setCurrentRow(r)
def pgUp(self):
r = self.ui.rulesList.currentRow()-1
if r>=0 and r<self.ui.rulesList.count():
self.ui.rulesList.setCurrentRow(r)
def on_addnodebutton(self, v):
if v:
self.tool = 'add node'
self.uncheckOtherTools(self.tool)
def on_removenodebutton(self, v):
if v:
self.tool = 'remove node'
self.uncheckOtherTools(self.tool)
def on_renamenodebutton(self, v):
if v:
self.tool = 'rename node'
self.uncheckOtherTools(self.tool)
def on_changenodetypebutton(self, v):
if v:
self.tool = 'change type'
self.uncheckOtherTools(self.tool)
def on_nodemovebutton(self, v):
if v:
self.tool = 'move node'
self.uncheckOtherTools(self.tool)
def on_addedgebutton(self, v):
if v:
self.tool = 'add edge'
self.uncheckOtherTools(self.tool)
def on_removeedgebutton(self, v):
if v:
self.tool = 'remove edge'
self.uncheckOtherTools(self.tool)
def on_changeedgelabelbutton(self, v):
if v:
self.tool = 'change label'
self.uncheckOtherTools(self.tool)
def on_negateedgebutton(self, v):
if v:
self.tool = 'negate edge'
self.uncheckOtherTools(self.tool)
def uncheckOtherTools(self, tool):
if tool != 'add node':
self.ui.addnodebutton.setChecked(False)
if tool != 'remove node':
self.ui.removenodebutton.setChecked(False)
if tool != 'rename node':
self.ui.renamenodebutton.setChecked(False)
if tool != 'change type':
self.ui.changenodetypebutton.setChecked(False)
if tool != 'move node':
self.ui.nodemovebutton.setChecked(False)
if tool != 'add edge':
self.ui.addedgebutton.setChecked(False)
if tool != 'remove edge':
self.ui.removeedgebutton.setChecked(False)
if tool != 'change label':
self.ui.changeedgelabelbutton.setChecked(False)
if tool != 'negate edge':
self.ui.negateedgebutton.setChecked(False)
self.lhsPainter.setCursor(self.cursor[tool])
self.rhsPainter.setCursor(self.cursor[tool])
def hideToolNames(self, value):
self.ui.addnodebutton.setIcon(QIcon('/usr/local/share/agm/icons/nodeadd.png'))
self.ui.removenodebutton.setIcon(QIcon('/usr/local/share/agm/icons/noderemove.png'))
self.ui.renamenodebutton.setIcon(QIcon('/usr/local/share/agm/icons/noderename.png'))
self.ui.changenodetypebutton.setIcon(QIcon('/usr/local/share/agm/icons/noderetype.png'))
self.ui.nodemovebutton.setIcon(QIcon('/usr/local/share/agm/icons/nodemove.png'))
self.ui.addedgebutton.setIcon(QIcon('/usr/local/share/agm/icons/linkadd.png'))
self.ui.removeedgebutton.setIcon(QIcon('/usr/local/share/agm/icons/linkremove.png'))
self.ui.changeedgelabelbutton.setIcon(QIcon('/usr/local/share/agm/icons/linktype.png'))
self.ui.negateedgebutton.setIcon(QIcon('/usr/local/share/agm/icons/linknegate.png'))
if value:
self.ui.addnodebutton.setText('')
self.ui.removenodebutton.setText('')
self.ui.renamenodebutton.setText('')
self.ui.changenodetypebutton.setText('')
self.ui.nodemovebutton.setText('')
self.ui.addedgebutton.setText('')
self.ui.removeedgebutton.setText('')
self.ui.changeedgelabelbutton.setText('')
self.ui.negateedgebutton.setText('')
else:
self.ui.addnodebutton.setText('add symbol')
self.ui.removenodebutton.setText('remove symbol')
self.ui.renamenodebutton.setText('rename symbol')
self.ui.changenodetypebutton.setText('change symbol type')
self.ui.nodemovebutton.setText('move symbol')
self.ui.addedgebutton.setText('add link')
self.ui.removeedgebutton.setText('remove link')
self.ui.changeedgelabelbutton.setText('change link label')
self.ui.negateedgebutton.setText('negate link')
# Manages close events
def appClose(self):
if self.modified:
self.close()
else:
self.close()
self.close()
def closeEvent(self, closeevent):
settings = QSettings("AGM", "mainWindowGeometry")
g = self.saveGeometry()
settings.setValue("geometry", g)
def about(self):
QMessageBox.information(self, "About", "Active Graph Grammar Language Editor (AGGLEditor):\n https://github.com/ljmanso/AGM/wiki \n\n Icons by Alexander Madyankin and Roman Shamin\n(MIT license)")
def draw(self):
self.lhsPainter.graph.setColors(self.rhsPainter.graph, True)
self.rhsPainter.graph.setColors(self.lhsPainter.graph, False)
self.lhsPainter.update()
self.rhsPainter.update()
for r in range(len(self.agmData.agm.rules)):
if type(r) == AGMRule:
item = self.ui.rulesList.item(r)
item.setText(self.agmData.agm.rules[r].name)
def changePassive(self, passive):
p = True
if passive == Qt.Unchecked:
p = False
self.agmData.agm.rules[self.ui.rulesList.currentRow()].passive = p
def changeHierarchical(self, hierarchical):
h = True
if hierarchical == Qt.Unchecked:
h = False
self.agmData.agm.rules[self.ui.rulesList.currentRow()].hierarchical = h
def changeCost(self, v):
self.agmData.agm.rules[self.ui.rulesList.currentRow()].cost = v
def changeFont(self):
self.fontDialog.show()
def changeAppearance(self):
self.appearance.show()
def showTypeHierarchy(self):
self.typeHierarchyWidget.show()
def addRule(self):
ddd = 'rule' + str(len(self.agmData.agm.rules))
self.ui.rulesList.addItem(ddd)
l = AGMGraph(side='L')
r = AGMGraph(side='R')
self.agmData.agm.addRule(AGMRule(ddd, l, r, False))
def removeCurrentRule(self):
pos = self.ui.rulesList.currentRow()
self.agmData.agm.rules = self.agmData.agm.rules[:pos] + self.agmData.agm.rules[pos+1:]
self.ui.rulesList.takeItem(pos)
def renameCurrentRule(self):
pos = self.ui.rulesList.currentRow()
r = RuleRenamer(self.width()/2, self.height()/2, self.agmData.agm.rules[pos], self)
r.setFocus(Qt.OtherFocusReason)
def changeRule(self, ruleN):
if type(self.agmData.agm.rules[ruleN]) in [AGMRule, AGMHierarchicalRule]:
self.ui.label.show()
self.ui.label_2.show()
self.ui.spacee.show()
self.ui.lhsParentWidget.show()
self.ui.rhsParentWidget.show()
self.ui.cost.show()
self.ui.label_3.show()
self.ui.passiveCheckBox.show()
self.ui.hierarchicalCheckBox.show()
# self.ui.comboRuleTextEdit.hide()
# self.ui.comboRuleLabel.hide()
# self.ui.label_5.show()
# self.ui.toolsList.show()
self.lhsPainter.graph = self.agmData.agm.rules[ruleN].lhs
self.rhsPainter.graph = self.agmData.agm.rules[ruleN].rhs
try:
self.ui.textParameters.setText(self.agmData.agm.rules[ruleN].parameters.replace("\n\t\t", "\n").lstrip())
except:
print traceback.format_exc()
try:
self.ui.textPrecondition.setText(self.agmData.agm.rules[ruleN].precondition.replace("\n\t\t", "\n").lstrip())
except:
print traceback.format_exc()
try:
self.ui.textEffect.setText(self.agmData.agm.rules[ruleN].effect.replace("\n\t\t", "\n").lstrip())
except:
print traceback.format_exc()
else:
self.ui.label.hide()
self.ui.label_2.hide()
self.ui.spacee.hide()
self.ui.lhsParentWidget.hide()
self.ui.rhsParentWidget.hide()
self.ui.cost.hide()
self.ui.label_3.hide()
self.ui.passiveCheckBox.hide()
self.ui.hierarchicalCheckBox.hide()
# self.ui.comboRuleTextEdit.show()
# self.ui.comboRuleLabel.show()
# self.ui.label_5.hide()
# self.ui.toolsList.hide()
#self.ui.tabWidget.setTabEnabled(1, False)
# self.ui.comboRuleTextEdit.setPlainText(self.agmData.agm.rules[ruleN].text)
self.disconnect(self.ui.passiveCheckBox, SIGNAL("stateChanged(int)"), self.changePassive)
self.disconnect(self.ui.hierarchicalCheckBox, SIGNAL("stateChanged(int)"), self.changeHierarchical)
self.disconnect(self.ui.cost, SIGNAL("valueChanged(int)"), self.changeCost)
if self.agmData.agm.rules[ruleN].passive:
self.ui.passiveCheckBox.setChecked(True)
else:
self.ui.passiveCheckBox.setChecked(False)
if isinstance(self.agmData.agm.rules[ruleN], AGMHierarchicalRule):
self.ui.hierarchicalCheckBox.setChecked(True)
else:
self.ui.hierarchicalCheckBox.setChecked(False)
self.ui.cost.setValue(int(self.agmData.agm.rules[ruleN].cost))
self.connect(self.ui.passiveCheckBox, SIGNAL("stateChanged(int)"), self.changePassive)
self.connect(self.ui.hierarchicalCheckBox, SIGNAL("stateChanged(int)"), self.changeHierarchical)
self.connect(self.ui.cost, SIGNAL("valueChanged(int)"), self.changeCost)
currRule = self.ui.rulesList.currentItem().text()
self.setWindowTitle('AGGLEditor - ['+currRule+']')
def generateCode(self):
path_pddl = QFileDialog.getSaveFileName(self, "Save FULL grammar (model verification) PDDL file as", "", "*.pddl")[0]
if not path_pddl.endswith(".pddl"): path_pddl += ".pddl"
self.agmData.generatePDDL(path_pddl, False)
path_pddl = QFileDialog.getSaveFileName(self, "Save partial grammar (active rules) PDDL file as", "", "*.pddl")[0]
if not path_pddl.endswith(".pddl"): path_pddl += ".pddl"
self.agmData.generatePDDL(path_pddl, True)
def generateAGGLPlannerCode(self):
path_apy = QFileDialog.getSaveFileName(self, "Save FULL grammar (model verification) AGGLPlanner file as", "", "*.aggl.py")[0]
if not path_apy.endswith(".aggl.py"): path_apy += ".aggl.py"
self.agmData.generateAGGLPlannerCode(path_apy, False)
path_apy = QFileDialog.getSaveFileName(self, "Save partial grammar (active rules) AGGLPlanner file as", "", "*.aggl.py")[0]
if not path_apy.endswith(".aggl.py"): path_apy += ".aggl.py"
self.agmData.generateAGGLPlannerCode(path_apy, True)
def exportRule(self):
path = str(QFileDialog.getSaveFileName(self, "Export rule", "", "*"))
if path[-4:] == '.svg': path = path[:-4]
pathLHS = path + 'LHS.png'
self.lhsPainter.export(pathLHS)
pathRHS = path + 'RHS.png'
self.rhsPainter.export(pathRHS)
def exportAll(self):
path = str(QFileDialog.getExistingDirectory(self, "Export all rules", ""))
self.exportAllPath(path)
def exportAllPath(self, path):
lhs = self.lhsPainter.graph
rhs = self.rhsPainter.graph
for i in range(len(self.agmData.agm.rules)):
rule = self.agmData.agm.rules[i]
self.lhsPainter.graph = rule.lhs
self.lhsPainter.export(str(path)+'/rule'+str(i)+'_lhs.svg')
self.rhsPainter.graph = rule.rhs
self.rhsPainter.export(str(path)+'/rule'+str(i)+'_rhs.svg')
self.lhsPainter.graph = lhs
self.rhsPainter.graph = rhs
def exportAllPNG(self):
path = str(QFileDialog.getExistingDirectory(self, "Export all rules", ""))
self.exportAllPathPNG(path)
def exportAllPathPNG(self, path):
lhs = self.lhsPainter.graph
rhs = self.rhsPainter.graph
for i in range(len(self.agmData.agm.rules)):
rule = self.agmData.agm.rules[i]
# Export LHS
self.lhsPainter.graph = rule.lhs
lhsPathPNG = str(path)+'/rule'+str(i)+'_lhs.png'
self.lhsPainter.exportPNG(lhsPathPNG)
# Export RHS
self.rhsPainter.graph = rule.rhs
rhsPathPNG = str(path)+'/rule'+str(i)+'_rhs.png'
self.rhsPainter.exportPNG(rhsPathPNG)
# Crop images similarly
#crop = False
crop = True
if crop:
#L
lImage=Image.open(lhsPathPNG)
lImage.load()
lImageBox = self.getBoxImage(lImage)
wL,hL = lImage.size
#R
rImage=Image.open(rhsPathPNG)
rImage.load()
rImageBox = self.getBoxImage(rImage)
wR,hR = rImage.size
# ----
if lImageBox == None:
lImageBox = rImageBox
if rImageBox == None:
rImageBox = lImageBox
w = wL
if w == None: w = wR
h = hL
if h == None: h = hR
imageBox = self.getProperBox(lImageBox, rImageBox, w, h)
lCropped=lImage.crop(imageBox)
lCropped.save(lhsPathPNG)
rCropped=rImage.crop(imageBox)
rCropped.save(rhsPathPNG)
self.lhsPainter.graph = lhs
self.rhsPainter.graph = rhs
def getBoxImage(self, image):
invert_im = ImageOps.invert(image)
return invert_im.getbbox()
def getProperBox(self, a, b, w, h):
c = h/2
y1 = min(a[1], b[1])
y2 = max(a[3], b[3])
ret = (0, y1, w, y2)
return ret
def open(self):
path = str(QFileDialog.getOpenFileName(self, "Export rule", "", "*.aggl")[0])
self.openFromFile(path)
def currentTypeChanged(self):
print 'a'
try:
if self.typeHierarchyWidget.previousSelected != self.typeHierarchyWidget.typeList.selectedItems():
self.reloadTypes()
self.typeHierarchyWidget.previousSelected = self.typeHierarchyWidget.typeList.selectedItems()
except:
self.reloadTypes()
self.typeHierarchyWidget.previousSelected = self.typeHierarchyWidget.typeList.selectedItems()
def reloadTypes(self):
try:
selected = self.typeHierarchyWidget.typeList.currentItem().text()
except:
selected = None
# list
prevlist = sorted([item.text().encode('latin1') for item in self.typeHierarchyWidget.typeList.findItems('', QtCore.Qt.MatchContains)])
currlist = sorted(self.agmData.agm.types.keys())
# print 'P', prevlist
# print 'C', currlist
if prevlist != currlist:
self.typeHierarchyWidget.typeList.clear()
for tp in sorted(self.agmData.agm.types.keys()):
self.typeHierarchyWidget.typeList.addItem(tp)
if selected:
# available
print 'in parents'
prevlist = sorted([item.text().encode('latin1') for item in self.typeHierarchyWidget.availableList.findItems('', QtCore.Qt.MatchContains)])
currlist = sorted(self.agmData.getPossibleParentsFor(selected))
if prevlist != currlist:
self.typeHierarchyWidget.availableList.clear()
for x in sorted(self.agmData.getPossibleParentsFor(selected)):
self.typeHierarchyWidget.availableList.addItem(x)
# selected
prevlist = sorted([item.text().encode('latin1') for item in self.typeHierarchyWidget.selectedList.findItems('', QtCore.Qt.MatchContains)])
currlist = sorted(self.agmData.getTypesDirect(selected))
if prevlist != currlist:
self.typeHierarchyWidget.selectedList.clear()
for x in sorted(self.agmData.getTypesDirect(selected)):
self.typeHierarchyWidget.selectedList.addItem(x)
else:
self.typeHierarchyWidget.availableList.clear()
self.typeHierarchyWidget.selectedList.clear()
self.typeHierarchyWidget.plot(self.agmData.agm.typesDirect)
def createType(self):
ty = 'newType createType'
self.agmData.addType(ty)
self.reloadTypes()
row = 0
while row < self.typeHierarchyWidget.typeList.count():
if self.typeHierarchyWidget.typeList.item(row).text() == ty:
break
else:
row += 1
self.typeHierarchyWidget.typeList.setCurrentRow(row)
def renameType(self):
try:
if len(self.typeHierarchyWidget.typeList.selectedItems()) == 0:
ret = QMessageBox.warning(self.typeHierarchyWidget, self.tr("AGGLEditor warning"), self.tr("Please, select a type to modify"))
return
try:
self.typeHierarchyWidget.edit.show()
except:
self.typeHierarchyWidget.edit = QLineEdit(self.typeHierarchyWidget.renameButton)
self.connect(self.typeHierarchyWidget.edit, SIGNAL("returnPressed()"), self.renameTypeDone)
self.typeHierarchyWidget.edit.show()
finally:
self.typeHierarchyWidget.edit.setFocus()
finally:
pass
def renameTypeDone(self):
text = self.typeHierarchyWidget.edit.text()
self.typeHierarchyWidget.edit.hide()
items = self.typeHierarchyWidget.typeList.selectedItems()
print items, len(items)
if text in self.agmData.agm.types.keys():
ret = QMessageBox.warning(self.typeHierarchyWidget, self.tr("AGGLEditor warning"), self.tr("The type "+text+" already exists"))
return
if len(items) != 0:
for i in self.typeHierarchyWidget.typeList.findItems(items[0].text(), 0):
self.agmData.agm.renameType(items[0].text(), text)
i.setText(text)
self.reloadTypes()
def includeTypeInheritance(self):
try:
selectedType = self.typeHierarchyWidget.typeList.selectedItems()[0].text()
except:
ret = QMessageBox.warning(self.typeHierarchyWidget, self.tr("AGGLEditor Warning"), self.tr("No type to modify was selected"))
return
try:
selectedParent = self.typeHierarchyWidget.availableList.selectedItems()[0].text()
except:
ret = QMessageBox.warning(self.typeHierarchyWidget, self.tr("AGGLEditor Warning"), self.tr("No type to include as parent to "+selectedType+" was selected"))
return
print 'include', selectedType, selectedParent
self.agmData.agm.includeTypeInheritance(selectedType, selectedParent)
self.reloadTypes()
def removeTypeInheritance(self):
try:
selectedType = self.typeHierarchyWidget.typeList.selectedItems()[0].text()
except:
traceback.print_exc()
ret = QMessageBox.warning(self.typeHierarchyWidget, self.tr("AGGLEditor Warning"), self.tr("No type to modify was selected"))
return
try:
selectedParent = self.typeHierarchyWidget.selectedList.selectedItems()[0].text()
except:
traceback.print_exc()
ret = QMessageBox.warning(self.typeHierarchyWidget, self.tr("AGGLEditor Warning"), self.tr("No type to remove from the parent list of "+selectedType+" was selected"))
return
print 'remove', selectedType, selectedParent
self.agmData.agm.removeTypeInheritance(selectedType, selectedParent)
self.reloadTypes()
def openFromFile(self, path):
if path[-5:] != '.aggl': path = path + '.aggl'
self.agmData = AGMFileDataParsing.fromFile(path, verbose=False, includeIncludes=False)
self.ui.rulesList.clear()
for rule in self.agmData.agm.rules:
q = QListWidgetItem()
q.setText(rule.name)
self.ui.rulesList.addItem(q)
if type(rule) == AGMRule:
pass
self.reloadTypes()
def saveAs(self):
path = QFileDialog.getSaveFileName(self, "Save as", "", "*.aggl")[0]
self.save(False, path)
def save(self, triggered=False, path=''):
if path == '':
if self.filePath != '':
path = self.filePath
else:
path = QFileDialog.getSaveFileName(self, "Save as", "", "*.aggl")[0]
self.filePath = path
self.agmData.properties['name'] = path.split('/')[-1].split('.')[0]
global vertexDiameter
self.agmData.properties['vertexDiameter'] = vertexDiameter
global nodeThickness
self.agmData.properties['nodeThickness'] = nodeThickness
global lineThickness
self.agmData.properties['lineThickness'] = lineThickness
global longPattern
self.agmData.properties['longPattern'] = longPattern
global shortPattern
self.agmData.properties['shortPattern'] = shortPattern
global spacePattern
self.agmData.properties['spacePattern'] = spacePattern
global fontName
self.agmData.properties['fontName'] = fontName
global fontSize
self.agmData.properties['fontSize'] = fontSize
self.agmData.toFile(path)
self.modified = False
def textParametersChanged(self):
self.agmData.agm.rules[self.ui.rulesList.currentRow()].parameters = str(self.ui.textParameters.toPlainText().encode( 'latin1')).strip().replace("\n", "\n\t\t")
def textPreconditionChanged(self):
self.agmData.agm.rules[self.ui.rulesList.currentRow()].precondition = str(self.ui.textPrecondition.toPlainText().encode('latin1')).strip().replace("\n", "\n\t\t")
def textEffectChanged(self):
self.agmData.agm.rules[self.ui.rulesList.currentRow()].effect = str(self.ui.textEffect.toPlainText().encode( 'latin1')).strip().replace("\n", "\n\t\t")
if __name__ == '__main__':
app = QApplication(sys.argv)
if len(sys.argv)>1:
if len(sys.argv)>5:
inputFile = sys.argv[1]
compileFlag1 = sys.argv[2]
outputFile1 = sys.argv[3]
compileFlag2 = sys.argv[4]
outputFile2 = sys.argv[5]
if compileFlag1 == '-f':
agmData = AGMFileDataParsing.fromFile(inputFile, verbose=False)
agmData.generatePDDL(outputFile1, False)
if compileFlag2 == '-p':
agmData = AGMFileDataParsing.fromFile(inputFile, verbose=False)
agmData.generatePDDL(outputFile2, True)
else:
clase = AGMEditor(sys.argv[1])
clase.show()
app.exec_()
else:
clase = AGMEditor()
clase.show()
app.exec_()
|
gpl-3.0
|
lcdb/lcdblib
|
lcdblib/plotting/compare_rnaseq_and_chipseq.py
|
1
|
10044
|
import os
import sys
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
from lcdblib.plotting import results_table
from lcdblib.stats import fisher
import pybedtools
import pandas
import argh
from argh import arg
def plot(de_results, regions=None, peaks=None, selected=None, x='baseMean',
y='log2FoldChange', disable_logx=False, logy=False, pval_col='padj',
alpha=0.1, lfc_cutoff=0, plot_filename=None,
disable_raster_points=False, genes_to_label=None, label_column=None,
report=None, gene_lists=None
):
"""
M-A plot showing up- and downregulated genes with optional labeling and
Fishers exact tests.
If --plot-filename is not specified, then the plot will be displayed and
points can be clicked for interactive exploration.
If --peaks and --regions are specified, then results from Fishers exact
tests will be printed to stdout, or to --report if specified.
Parameters
----------
de_results : str or pandas.DataFrame
If str, it's the filename of a TSV of differential expression results,
with first column as gene ID. It will be parsed into a dataframe where
the index is gene ID. When called as a library, an already-created
pandas.DataFrame can optionally be provided instead.
regions : str or pybedtools.BedTool
Gene regions in which to look for intersections with peaks. BED file
where the 4th column contains gene IDs that are also present in first
column of `de_results`. Typically this would be a BED file of promoters
or gene bodies. When called as a library, a pybedtools.BedTool object
can optionally be provided instead.
peaks : str or pybedtools.BedTool
BED file to be intersected with `regions`. When called as a library,
a pybedtools.BedTool object can optionally be provided instead.
selected : str or list-like
Replaces `regions` `peaks` arguments; useful for when you already know
which genes you want to select (e.g., upregulated from a different
experiment). If a string, assume it's a filename and use the first
column which will be used as an index into the `de_results` dataframe.
When called as a library, if `selected` is not a string it will be used
as an index into the dataframe.
x : str
Column to use for x-axis. Default of "baseMean" expects DESeq2
results
y : str
Column to use for y-axis. Default of "log2FoldChange" expects DESeq2
results
disable_logx : bool
Disable default behavior of transforming x values using log10
logy : bool
Transform y values using log2
pval-col : str
Column to use for statistical significance. Default "padj" expectes
DESeq2 results.
alpha : float
Threshold for calling significance. Applied to `pval_col`
lfc_cutoff : float
Log2fold change cutoff to be applied to y values. Threshold is applied
post-transformation, if any specified (e.g., `logy` argument).
plot_filename : str
File to save plot. Format auto-detected by extension. Output directory
will be created if needed.
disable_raster_points : bool
Disable the default behavior of rasterizing points in a PDF. Use
sparingly, since drawing 30k+ individual points in a PDF may slow down
your machine.
genes_to_label: str or list-like
Optional file containing genes to label with text. First column must be
a subset of the first column of `de_results`. Lines starting with '#'
and subsequent tab-separated columns will be ignored. When called as
a library, a list-like object of gene IDs can be provided.
label_column : str
Optional column from which to take gene labels found in
`genes_to_label` (e.g., "symbol"). If the value in this column is
missing, fall back to the index. Use this if your gene IDs are long
Ensembl IDs but you want the gene symbols to show up on the plot.
report : str
Where to write out Fisher's exact test results. Default is stdout
gene_lists : str
Prefix to gene lists. If specified, gene lists corresponding to the
cells of the 2x2 Fishers exact test will be written to
{prefix}.up.tsv and {prefix}.dn.tsv. These are subsets of `de_results`
where genes are up and have a peak in region (or are selected), or
downregulated and have a peak in region (or are selected),
respectively.
"""
rasterized = not disable_raster_points
rt = results_table.DESeq2Results(de_results, import_kwargs=dict(index_col=0))
up = rt.upregulated(alpha=alpha, lfc=lfc_cutoff)
dn = rt.downregulated(alpha=alpha, lfc=-lfc_cutoff)
ch = (up | dn)
un = ~ch
sns.set_context('talk')
sns.set_style('white')
general_kwargs=dict(marker='.', alpha=0.4, color='0.5', picker=5,
label="_", linewidth=0, rasterized=rasterized)
genes_to_highlight = [
(
up,
dict(color='#990000', marker='o', s=40, alpha=0.5, label='up (%s)' % sum(up))
),
(
dn,
dict(color='#005c99', marker='o', s=40, alpha=0.5, label='down (%s)' % sum(dn))
),
]
if genes_to_label:
_genes_to_label = []
if isinstance(genes_to_label, str):
for i in open(genes_to_label):
if i.startswith('#'):
continue
_genes_to_label.append(i.split('\t')[0].strip())
else:
_genes_to_label = genes_to_label
ind = rt.data.index.isin(_genes_to_label)
# Don't add labels if a coordinate is null
ind = ind & ~(rt.data[y].isnull() | rt.data[x].isnull())
if label_column:
names = rt.data.loc[ind, label_column]
# Fill in nulls with index to avoid labeling all genes with no
# symbol as "nan"
n = names.isnull()
names[n] = rt.index[n]
else:
names = rt.index[ind]
names = list(names)
genes_to_highlight.append(
(
ind,
dict(rasterized=rasterized, names=names, facecolor='None',
alpha=1.0, s=160, linewidth=1, zorder=100, label='_')
)
)
if not disable_logx:
xfunc=np.log10
else:
xfunc=None
if report is None:
output = sys.stdout
else:
output = open(report, 'w')
if selected and (peaks or regions):
raise ValueError(
"`selected` is mutually exclusive with `peaks` and `regions`")
do_fisher = False
if selected:
do_fisher = True
if isinstance(selected, str):
selected = list(pandas.read_table(selected, index_col=0).index)
selected_genes = rt.index.isin(selected)
row_names = ['selected', 'not selected']
elif peaks is not None and regions is not None:
do_fisher = True
row_names = ['has peak', 'no peak']
regions = pybedtools.BedTool(regions)
peaks = pybedtools.BedTool(peaks)
with_peak = list(set([i.name for i in regions.intersect(peaks, u=True)]))
in_region = peaks.intersect(regions, u=True)
selected_genes = rt.index.isin(with_peak)
npeaks = len(peaks)
nregions = len(regions)
npeaks_in_region = len(in_region)
output.write('Total peaks: {}\n'.format(npeaks))
output.write('Peaks in regions: {0} ({1:.2f}%)\n\n'.format(npeaks_in_region, npeaks_in_region / npeaks * 100))
if do_fisher:
genes_to_highlight.append(
(
selected_genes,
dict(color='#ff9900', alpha=0.8, s=30,
rasterized=rasterized, label='{0} ({1})'.format(row_names[0], sum(selected_genes)))
)
)
output.write(
fisher.fisher_tables(
table=fisher.table_from_bool(selected_genes, up),
row_names=row_names,
col_names=['upregulated', 'not'],
title='Upregulated (lfc>{0}; padj<{1})'.format(lfc_cutoff, alpha)))
output.write('\n\n')
output.write(
fisher.fisher_tables(
table=fisher.table_from_bool(selected_genes, dn),
row_names=row_names,
col_names=['downregulated', 'not'],
title='Downregulated (lfc<-{0}; padj<{1})'.format(lfc_cutoff, alpha)))
output.write('\n\n')
output.write(
fisher.fisher_tables(
table=fisher.table_from_bool(selected_genes, ch),
row_names=row_names,
col_names=['changed', 'not'],
title='Changed (lfc<-{0}; padj<{1})'.format(lfc_cutoff, alpha)))
if gene_lists is not None:
rt.data[selected_genes & up].to_csv(gene_lists + '.up.tsv', sep='\t')
rt.data[selected_genes & dn].to_csv(gene_lists + '.dn.tsv', sep='\t')
if report is not None:
output.close()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
ax = rt.scatter(
ax=ax,
x=x,
y=y,
xfunc=xfunc,
genes_to_highlight=genes_to_highlight,
general_kwargs=general_kwargs,
offset_kwargs=dict(x=-0.1),
label_kwargs=dict(
horizontalalignment='right',
verticalalignment='center',
style='italic',
size=10,
zorder=500,
bbox=dict(facecolor='w', alpha=0.5, edgecolor='none'))
)
ax.legend(loc='best', prop=dict(size=10))
if plot_filename:
dirname = os.path.dirname(plot_filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
fig.savefig(plot_filename)
return ax
if __name__ == "__main__":
argh.dispatch_command(plot)
plt.show()
|
mit
|
andreabrambilla/libres
|
python/res/enkf/export/summary_observation_collector.py
|
2
|
2313
|
from pandas import DataFrame, MultiIndex
import numpy
from res.enkf import ErtImplType, EnKFMain, EnkfFs, RealizationStateEnum, EnkfObservationImplementationType
from res.enkf.key_manager import KeyManager
from res.enkf.plot_data import EnsemblePlotData
from ecl.util.util import BoolVector
class SummaryObservationCollector(object):
@staticmethod
def getAllObservationKeys(ert):
"""
@type ert: EnKFMain
@rtype: list of str
"""
key_manager = KeyManager(ert)
return key_manager.summaryKeysWithObservations()
@staticmethod
def loadObservationData(ert, case_name, keys=None):
"""
@type ert: EnKFMain
@type case_name: str
@type keys: list of str
@rtype: DataFrame
"""
fs = ert.getEnkfFsManager().getFileSystem(case_name)
time_map = fs.getTimeMap()
dates = [time_map[index].datetime() for index in range(1, len(time_map))]
summary_keys = SummaryObservationCollector.getAllObservationKeys(ert)
if keys is not None:
summary_keys = [key for key in keys if key in summary_keys] # ignore keys that doesn't exist
columns = summary_keys
std_columns = ["STD_%s" % key for key in summary_keys]
df = DataFrame(index=dates, columns=columns + std_columns)
for key in summary_keys:
observation_keys = ert.ensembleConfig().getNode(key).getObservationKeys()
for obs_key in observation_keys:
observations = ert.getObservations()
observation_data = observations[obs_key]
history_length = ert.getHistoryLength()
for index in range(0, history_length):
if observation_data.isActive(index):
obs_time = observations.getObservationTime(index).datetime()
node = observation_data.getNode(index)
value = node.getValue()
std = node.getStandardDeviation()
df[key][obs_time] = value
df["STD_%s" % key][obs_time] = std
return df
@classmethod
def summaryKeyHasObservations(cls, ert, key):
return len(ert.ensembleConfig().getNode(key).getObservationKeys()) > 0
|
gpl-3.0
|
scienceopen/pyimagevideo
|
pyimagevideo/__init__.py
|
1
|
4777
|
from contextlib import contextmanager
import numpy as np
from pathlib import Path
import imageio
from typing import List, Tuple, Union
try:
from matplotlib.pyplot import figure, draw, close
except (ImportError, RuntimeError):
figure = draw = close = None
try:
import cv2
except ImportError:
cv2 = None
try:
from skimage.transform import resize
except ImportError:
resize = None
def wavelength2rgb(wavelength: float, gamma: float = 0.8) -> Tuple[float, float, float]:
'''
http://www.noah.org/wiki/Wavelength_to_RGB_in_Python
This converts a given wavelength into an approximate RGB value.
The given wavelength is in nanometers.
The range of wavelength is 380 nm through 750 nm.
Based on code by Dan Bruton
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
wavelength = float(wavelength)
if 440. >= wavelength >= 380.:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.
B = (1. * attenuation) ** gamma
elif wavelength >= 440 and wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.
elif wavelength >= 490 and wavelength <= 510:
R = 0.
G = 1.
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif wavelength >= 510 and wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.
B = 0.
elif wavelength >= 580 and wavelength <= 645:
R = 1.0
G = (-(wavelength - 645) / (645 - 580)) ** gamma
B = 0.
elif wavelength >= 645 and wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1.0 * attenuation) ** gamma
G = 0.
B = 0.
else:
R = 0.
G = 0.
B = 0.
R = int(R * 255)
G = int(G * 255)
B = int(B * 255)
assert 255 >= R >= 0 and 255 >= G >= 0 and 255 >= B >= 0
return R, G, B
def tone(fs: int = 8000, T: float = 1, f0: float = 1000) -> np.ndarray:
"""
generate f0 Hz sinusoid for T seconds at fs S/s.
"""
return np.sin(2 * np.pi * f0 * np.arange(0, T, 1 / fs))
def dialtone(fs: int = 8000, T: float = 1):
"""
generate North American dial tone
https://en.wikipedia.org/wiki/Precise_Tone_Plan
"""
return 0.5 * (tone(fs, T, 440) + tone(fs, T, 350))
def genimgseries(odir: Path) -> List[Path]:
if figure is None:
raise ImportError('pip install matplotlib')
odir = Path(odir).expanduser()
fg = figure(1, figsize=(0.5, 0.5))
# fg.set_visible(False)
ax = fg.gca()
flist = []
for i in range(10):
ax.clear()
ax.axis('off')
ax.text(0, 0, i, fontsize=36)
draw()
fn = odir / f'{i}.png'
flist.append(fn)
fg.savefig(fn, bbox_inches='tight', pad_inches=0)
close(fg)
return flist
def png2tiff(ofn: Path, pat: str, indir: Path = None):
"""
convert series of PNG, which may not be exactly the same shape,
to a multipage TIFF (in the same directory)
alternatives: use ImageMagick from command line, or Wand.
however, since the files are grouped in a specific weird way, the histfeas program
worked best to have this perhaps ImageMagick duplicative functionality in
Python/imageio/skimage.
"""
if resize is None:
raise ImportError('pip install scikit-image')
ofn = Path(ofn).expanduser()
indir = ofn.parent if indir is None else Path(indir).expanduser()
# %% convert these sets of images to multipage image
flist = sorted(indir.glob(pat)) # yes, sorted()
if not flist:
raise FileNotFoundError('found no files with {pat} in {ofn}')
im0 = imageio.imread(flist[0]) # priming read
images = np.empty((len(flist), *im0.shape), dtype=im0.dtype)
for i, f in enumerate(flist):
im = imageio.imread(f)
images[i, ...] = resize(im, im0.shape, mode='edge') # they are all of slightly different shape
imageio.mimwrite(ofn, images)
@contextmanager
def VideoWriter(ofn: Union[str, Path], cc4: str, xypix: tuple, fps: float, usecolor: bool):
"""
inputs
ofn: string/Path output filename to write
fourcccode: string with four character fourcc code e.g. 'FFV1'
xypix: two-element tuple with x,y pixel count
usecolor: bool color or bw
"""
if cv2 is None:
raise ImportError('OpenCV was not installed or loaded')
ncc4 = cv2.VideoWriter_fourcc(*cc4)
hv = cv2.VideoWriter(str(ofn), ncc4, fps=fps, frameSize=xypix, isColor=usecolor)
if not hv or not hv.isOpened():
raise RuntimeError(f'trouble starting video {ofn}')
yield hv
hv.release()
|
gpl-3.0
|
Obus/scikit-learn
|
sklearn/neighbors/base.py
|
115
|
29783
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
|
bsd-3-clause
|
JohanComparat/pySU
|
spm/bin_deep_surveys/combine_model_spectra_deep2.py
|
1
|
8884
|
import time
t0t=time.time()
from os.path import join
import os
import numpy as n
import glob
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import astropy.io.fits as fits
from scipy.interpolate import interp1d
from scipy.stats import norm as gaussD
import GalaxySpectrumFIREFLY as gs
import StellarPopulationModel as spm
def create_tbhdu(sp_cha, imf, lib):
c1 = fits.Column(name='wavelength', format='D', unit='Angstrom', array=sp_cha[1].data['wavelength'])
c2 = fits.Column(name='model_flux', format='D', unit='1e-17 erg/cm2/s', array=sp_cha[1].data['firefly_model'])
coldefs = fits.ColDefs([c1, c2])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.header['HIERARCH library'] = lib
tbhdu.header['HIERARCH IMF'] = imf
tbhdu.header['HIERARCH age_lightW'] = sp_cha[1].header['age_lightW_mean']
tbhdu.header['HIERARCH age_lightW_up'] = sp_cha[1].header['age_lightW_mean_up']
tbhdu.header['HIERARCH age_lightW_low'] = sp_cha[1].header['age_lightW_mean_low']
tbhdu.header['HIERARCH metallicity_lightW'] = sp_cha[1].header['metallicity_lightW_mean']
tbhdu.header['HIERARCH metallicity_lightW_up'] = sp_cha[1].header['metallicity_lightW_mean_up']
tbhdu.header['HIERARCH metallicity_lightW_low'] = sp_cha[1].header['metallicity_lightW_mean_low']
tbhdu.header['HIERARCH age_massW'] = sp_cha[1].header['age_massW_mean']
tbhdu.header['HIERARCH age_massW_up'] = sp_cha[1].header['age_massW_mean_up']
tbhdu.header['HIERARCH age_massW_low'] = sp_cha[1].header['age_massW_mean_low']
tbhdu.header['HIERARCH metallicity_massW'] = sp_cha[1].header['metallicity_massW_mean']
tbhdu.header['HIERARCH metallicity_massW_up'] = sp_cha[1].header['metallicity_massW_mean_up']
tbhdu.header['HIERARCH metallicity_massW_low'] = sp_cha[1].header['metallicity_massW_mean_low']
tbhdu.header['HIERARCH EBV'] = sp_cha[1].header['EBV']
tbhdu.header['HIERARCH stellar_mass'] = sp_cha[1].header['stellar_mass_mean']
tbhdu.header['HIERARCH stellar_mass_up'] = sp_cha[1].header['stellar_mass_mean_up']
tbhdu.header['HIERARCH stellar_mass_low'] = sp_cha[1].header['stellar_mass_mean_low']
tbhdu.header['HIERARCH ssp_number'] = sp_cha[1].header['ssp_number']
for el in sp_cha[1].header[33:]:
tbhdu.header['HIERARCH '+el] = sp_cha[1].header[el]
return tbhdu
env = 'DEEP2_DIR'
out_dir = os.path.join(os.environ[env], 'stellarpop')
im_dir = os.path.join(os.environ[env], 'stellarpop', 'images')
init_cat=join(os.environ['DEEP2_DIR'], "catalogs", "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.fits")
summary_catalog = join(os.environ['DEEP2_DIR'], "catalogs", "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.spm.fits")
hdu_orig_table = fits.open(init_cat)
orig_table = hdu_orig_table[1].data
orig_cols = orig_table.columns
catalog_entry = orig_table[0]
topdirs = join( os.environ['DEEP2_DIR'], 'stellarpop-*', 'stellarpop')
def make_summary(catalog_entry):
tbhdus = []
#table_all = []
#for catalog_entry in orig_table:
mask=str(catalog_entry['MASK'])
objno=str(catalog_entry['OBJNO'])
# defines output files
out_file = 'spFly-deep2-'+mask+'-'+objno+'.fits'
path_2_out_file = os.path.join(out_dir, out_file)
wwwwwqq
im_file = 'spFly-deep2-'+mask+'-'+objno+'.png'
path_2_im_file = os.path.join(im_dir, im_file)
# now gets the models
models = n.array(glob.glob(join(topdirs, 'spFly-deep2-'+mask+'-'+objno+"*.fits")))
models.sort()
print mask, objno
path_to_spectrum = glob.glob(join(os.environ['DEEP2_DIR'], 'spectra', mask, '*', '*' + objno + '*_fc_tc.dat'))
if len(path_to_spectrum)>=1 and len(models)>=2 and os.path.isfile(path_2_out_file)==False:
# open the observation file
spe=gs.GalaxySpectrumFIREFLY("-", milky_way_reddening=True)
spe.openObservedDEEP2pectrum(catalog_entry)
spec = interp1d(spe.wavelength, spe.flux)
err = interp1d(spe.wavelength, spe.error)
wl_data_max = n.max(spe.wavelength)
wl_data_min = n.min(spe.wavelength)
N_data_points = len(spe.wavelength)
for path_2_model in models:
imf = path_2_model.split('/')[-3].split('-')[2]
lib = path_2_model.split('/')[-3].split('-')[3]
tb = create_tbhdu(fits.open(path_2_model), imf, lib)
tbhdus.append(tb)
prihdr = fits.Header()
prihdr['file'] = out_file
prihdr['mask'] = mask
prihdr['objno'] = objno
prihdr['models'] = 'Maraston_2011'
prihdr['fitter'] = 'FIREFLY'
#prihdr['ageMin'] = tbhdus[0].header['ageMin']
#prihdr['ageMax'] = tbhdus[0].header['ageMax']
#prihdr['Zmin'] = tbhdus[0].header['Zmin']
#prihdr['Zmax'] = tbhdus[0].header['Zmax']
#
#prihdr['HIERARCH age_universe'] = tbhdus[1].header['age_universe']
prihdr['HIERARCH redshift'] = spe.redshift
# now creates the figure per model
fig = p.figure(0, figsize = (7, 10), frameon=False)#, tight_layout=True)
rect = 0.2, 0.15, 0.85, 0.95
#ax = fig.add_axes(rect, frameon=False)
# panel with the spectrum
fig.add_subplot(3,1,1)
p.plot(spe.wavelength[::2], spe.flux[::2], 'k', rasterized =True, alpha=0.5)
p.yscale('log')
mean_data = n.median(spe.flux)
p.ylim((mean_data/8., mean_data*8.))
p.xlabel('Wavelength [Angstrom]')
p.ylabel(r'Flux [$f_\lambda$ $10^{-17}$ erg/cm2/s/A]')
p.title("mask=" + mask + ", objno=" + objno + ", z=" + str(n.round(spe.redshift,3)))
# second panel distribution of residuals
fig.add_subplot(3,1,2)
for hdu in tbhdus:
ok_model = (hdu.data['wavelength']>wl_data_min)&(hdu.data['wavelength']<wl_data_max)
wl_model = hdu.data['wavelength'][ok_model]
#p.plot(wl_model, (spec(wl_model)-hdu.data['model_flux'][ok_model])/err(wl_model), 'k', rasterized =True, alpha=0.5)
chi2s=(spec(wl_model)-hdu.data['model_flux'][ok_model])/err(wl_model)
p.hist(chi2s, bins = n.arange(-2,2,0.1), normed = True, histtype='step', label=hdu.header['IMF']+hdu.header['library']+", EBV="+str(n.round(hdu.header['EBV'],3))+r", $\chi^2=$"+str(n.round(n.sum(chi2s**2.)/(len(chi2s)-2.),4)))
p.ylim((-0.02,1.02))
#p.yscale('log')
p.xlabel('(data-model)/error')
p.ylabel('Normed distribution')
hdu.header['chi2'] = n.sum(chi2s**2.)
hdu.header['ndof'] = len(chi2s)-2.
p.plot(n.arange(-2,2,0.005), gaussD.pdf(n.arange(-2,2,0.005)), 'k--', label=r'N(0,1)', lw=0.5)
p.grid()
p.legend(frameon=False, loc=0, fontsize=8)
fig.add_subplot(3,1,3)
tpl = n.transpose(n.array([ [
hdu.header['age_lightW'],
hdu.header['stellar_mass'],
hdu.header['age_lightW_up']-hdu.header['age_lightW'],
hdu.header['age_lightW']-hdu.header['age_lightW_low'],
hdu.header['stellar_mass_up']-hdu.header['stellar_mass'],
hdu.header['stellar_mass']-hdu.header['stellar_mass_low']]
for hdu in tbhdus ]))
p.errorbar(tpl[0], tpl[1], xerr=[tpl[2], tpl[3]], yerr=[tpl[4], tpl[5]], barsabove=True, fmt='o')
#p.axvline(prihdr['age_universe'], color='r', ls='dashed')
idsUP = n.argsort(tpl[1])
iterList = n.array(tbhdus)[idsUP]
for jj, hdu in enumerate(iterList):
p.annotate(hdu.header['IMF']+hdu.header['library']+r", $\log(Z/Z_\odot)=$"+str(n.round(hdu.header['metallicity_lightW'],4)),
xy = (hdu.header['age_lightW'], hdu.header['stellar_mass']), xycoords='data',
xytext=(0.85, (jj+0.5)/len(iterList)), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05, width=0.2, headwidth=3),
horizontalalignment='right', verticalalignment='top', fontsize=9)
p.ylabel(r'$\log_{10}(M/[M_\odot])$')
p.xlabel(r'$\log_{10}(age/[yr])$')
#p.ylim((9,12.5))
p.grid()
p.savefig(path_2_im_file)
p.clf()
prihdu = fits.PrimaryHDU(header=prihdr)
newlist = [prihdu]
for el in tbhdus:
newlist.append(el)
thdulist = fits.HDUList(newlist) # , tbhdu_cha_nd, tbhdu_kr_nd, tbhdu_sa_nd, tbhdu_cha_el, tbhdu_kr_el, tbhdu_sa_el ])
if os.path.isfile(path_2_out_file ):
os.remove(path_2_out_file )
thdulist.writeto( path_2_out_file )
print time.time()-t0t
return 1.
else:
return 0.
for catalog_entry in orig_table:
print make_summary(catalog_entry)
|
cc0-1.0
|
rs2/pandas
|
pandas/tests/series/indexing/test_mask.py
|
5
|
1602
|
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
def test_mask():
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
tm.assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
tm.assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
tm.assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
tm.assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
tm.assert_series_equal(rs, rs2)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.mask(1)
with pytest.raises(ValueError, match=msg):
s.mask(cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
tm.assert_series_equal(result, expected)
# see gh-21891
s = Series([1, 2])
res = s.mask([True, False])
exp = Series([np.nan, 2])
tm.assert_series_equal(res, exp)
def test_mask_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
tm.assert_series_equal(rs.dropna(), s[~cond])
tm.assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.mask(cond, -s))
|
bsd-3-clause
|
XiaoxiaoLiu/morphology_analysis
|
bigneuron/run_taiwan_analysis.py
|
1
|
1087
|
__author__ = 'xiaoxiaol'
import sys
import os
import platform
import pandas as pd
import numpy as np
if (platform.system() == "Linux"):
WORK_PATH = "/local1/xiaoxiaol/work"
else:
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
import bigneuron.median_compare_to_consensus as mc2c
#DATA='/home/xiaoxiaol/work/data'
#DATA='/data/mat/xiaoxiaol/data/big_neuron'
DATA='/mnt/BigNeuron/data'
test = 0
smooth = 1
data_DIR= DATA+"/taiwan16k"
fn_list = '~/work/data/taiwan_image_file_name_list.csv'
df_i = pd.read_csv(fn_list)
imageIDs = df_i['image_file_name']
if test:
ids = np.random.randint(1,15921,100)
ids = range(1,100,1)
imageIDs = imageIDs[ids]
# imageIDs= imageIDs.astype('str')
# for im in images[random_ids]:
subfolder = "consensus_0330"
if smooth>0:
subfolder=subfolder+"_anisosmooth"
output_dir = data_DIR+'/'+subfolder+"/analysis_results"
mc2c.pipe(data_DIR+'/'+subfolder, output_dir, imageIDs,'median_distances.csv',COLLECT_FROM_DISTANCE_MATRIX=1,EXTRACT_MEDIAN_CONSENSUS=1)
|
gpl-3.0
|
horance-liu/tensorflow
|
tensorflow/python/estimator/inputs/pandas_io.py
|
86
|
4503
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
|
apache-2.0
|
0asa/scikit-learn
|
examples/text/document_clustering.py
|
31
|
8036
|
"""
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not (opts.n_components or opts.use_hashing):
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
|
bsd-3-clause
|
GoogleCloudPlatform/mlops-on-gcp
|
on_demand/composer/labs/chicago_taxifare/trainer/model.py
|
4
|
5655
|
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow import feature_column as fc
from tensorflow.keras import layers
from tensorflow.keras import models
from google.cloud import bigquery
CSV_COLUMNS = [
'fare_amount',
'dayofweek',
'hourofday',
'pickuplon',
'pickuplat',
'dropofflon',
'dropofflat'
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], [0], [0], [0.0], [0.0], [0.0], [0.0]]
UNWANTED_COLS = []
def features_and_labels(row_data):
label = row_data.pop(LABEL_COLUMN)
features = row_data
for unwanted_col in UNWANTED_COLS:
features.pop(unwanted_col)
return features, label
def create_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL):
dataset = tf.data.experimental.make_csv_dataset(
pattern, batch_size, CSV_COLUMNS, DEFAULTS)
dataset = dataset.map(features_and_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=1000).repeat(None)
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
def transform(inputs, num_cols, cat_cols):
print("Inputs before features transformation: {}".format(inputs.keys()))
# Pass-through columns
transformed = inputs.copy()
feature_columns = {
colname: tf.feature_column.numeric_column(colname)
for colname in num_cols
}
# Add Euclidean distance
transformed['euclidean'] = layers.Lambda(
euclidean,
name='euclidean')([inputs['pickuplon'],
inputs['pickuplat'],
inputs['dropofflon'],
inputs['dropofflat']])
feature_columns['euclidean'] = fc.numeric_column('euclidean')
# Shift 'dayofweek' feature to a value range of 0-6
transformed['dayofweek'] = transformed['dayofweek'] - 1
# Create categorical columns (wrapped in indicator columns)
feature_columns['hourofday'] = fc.indicator_column(
fc.categorical_column_with_identity('hourofday', 24))
feature_columns['dayofweek'] = fc.indicator_column(
fc.categorical_column_with_identity('dayofweek', 7))
print("Transformed features: {}".format(transformed.keys()))
print("Feature columns: {}".format(feature_columns.keys()))
return transformed, feature_columns
def build_dnn_model():
NUM_COLS = [
'pickuplon',
'pickuplat',
'dropofflon',
'dropofflat',
]
CAT_COLS = [
'hourofday',
'dayofweek',
]
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUM_COLS
}
inputs.update({
colname: layers.Input(name=colname, shape=(), dtype='int32')
for colname in CAT_COLS
})
# transforms
transformed, feature_columns = transform(inputs,
num_cols=NUM_COLS,
cat_cols=CAT_COLS)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
# two hidden layers of [32, 8] just in like the BQML DNN
h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
h2 = layers.Dense(8, activation='relu', name='h2')(h1)
# final output is a linear activation because this is regression
output = layers.Dense(1, activation='linear', name='fare')(h2)
model = models.Model(inputs, output)
# Compile model
model.compile(optimizer='adam',
loss='mse',
metrics=['RootMeanSquaredError'])
return model
def train_and_evaluate(hparams):
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
BATCH_SIZE_PER_REPLICA = 256
TRAIN_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
NUM_TRAIN_EXAMPLES = 4 * 52000 * hparams['train_epochs']
NUM_EVALS = hparams['train_epochs']
NUM_EVAL_EXAMPLES = 52000
OUTDIR = hparams['output_dir']
LOGDIR = hparams['log_dir']
with strategy.scope():
model = build_dnn_model()
trainds = create_dataset(hparams['train_data_path'],
TRAIN_BATCH_SIZE,
tf.estimator.ModeKeys.TRAIN)
evalds = create_dataset(hparams['eval_data_path'],
1000,
tf.estimator.ModeKeys.EVAL)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
validation_steps = NUM_EVAL_EXAMPLES // BATCH_SIZE_PER_REPLICA
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath=OUTDIR),
tf.keras.callbacks.TensorBoard(log_dir=LOGDIR)]
history = model.fit(trainds,
verbose=2,
validation_data=evalds,
validation_steps=validation_steps,
epochs=NUM_EVALS,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks)
tf.saved_model.save(model, hparams['output_dir'])
val_metric = history.history['val_RootMeanSquaredError'][NUM_EVALS-1]
client = bigquery.Client()
sql = """ INSERT `{0}.model_metrics`
VALUES ('{1}',{2});
""".format(hparams['output_ds'], hparams['version_name'], val_metric)
query_job = client.query(sql)
print(query_job.done())
return history
|
apache-2.0
|
NunoEdgarGub1/scikit-learn
|
sklearn/linear_model/tests/test_randomized_l1.py
|
214
|
4690
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
|
bsd-3-clause
|
pxsdirac/tushare
|
tushare/stock/shibor.py
|
38
|
5010
|
# -*- coding:utf-8 -*-
"""
上海银行间同业拆放利率(Shibor)数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
from tushare.util import dateu as du
def shibor_data(year=None):
"""
获取上海银行间同业拆放利率(Shibor)
Parameters
------
year:年份(int)
Return
------
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Shibor']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor',
year, lab,
year))
df.columns = ct.SHIBOR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_quote_data(year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
date:日期
bank:报价银行名称
ON:隔夜拆放利率
ON_B:隔夜拆放买入价
ON_A:隔夜拆放卖出价
1W_B:1周买入
1W_A:1周卖出
2W_B:买入
2W_A:卖出
1M_B:买入
1M_A:卖出
3M_B:买入
3M_A:卖出
6M_B:买入
6M_A:卖出
9M_B:买入
9M_A:卖出
1Y_B:买入
1Y_A:卖出
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Quote']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Quote',
year, lab,
year), skiprows=[0])
df.columns = ct.QUOTE_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_ma_data(year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
其它分别为各周期5、10、20均价
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.SHIBOR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_data(year=None):
"""
获取贷款基础利率(LPR)
Parameters
------
year:年份(int)
Return
------
date:日期
1Y:1年贷款基础利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR',
year, lab,
year))
df.columns = ct.LPR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_ma_data(year=None):
"""
获取贷款基础利率均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
1Y_5:5日均值
1Y_10:10日均值
1Y_20:20日均值
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR_Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.LPR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
|
bsd-3-clause
|
thp44/delphin_6_automation
|
data_process/2d_1d/archieve/relative_humidity_comparison.py
|
1
|
18212
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
import matplotlib.pyplot as plt
# RiBuild Modules
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
# Folders
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
graphic_folder = r'U:\RIBuild\2D_1D\Processed Results\4A'
hdf_file = out_folder + '/relative_humidity.h5'
# Open HDF
# Uninsulated
dresdenzp_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_uninsulated_4a')
dresdenzd_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_uninsulated_4a')
postdam_highratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_uninsulated_4a')
dresdenzp_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_uninsulated_4a')
dresdenzd_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_uninsulated_4a')
postdam_lowratio_uninsulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_uninsulated_4a')
total_uninsulated_4a = pd.concat([dresdenzp_highratio_uninsulated_4a, dresdenzd_highratio_uninsulated_4a,
postdam_highratio_uninsulated_4a, dresdenzp_lowratio_uninsulated_4a,
dresdenzd_lowratio_uninsulated_4a, postdam_lowratio_uninsulated_4a])
# Insulated
dresdenzp_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_high_ratio_insulated_4a')
dresdenzd_highratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_high_ratio_insulated_4a')
postdam_highratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_high_ratio_insulated_4a')
dresdenzp_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zp_low_ratio_insulated_4a')
dresdenzd_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'dresden_zd_low_ratio_insulated_4a')
postdam_lowratio_insulated_4a = pd.read_hdf(hdf_file, 'potsdam_low_ratio_insulated_4a')
total_insulated_4a = pd.concat([dresdenzp_highratio_insulated_4a, dresdenzd_highratio_insulated_4a,
postdam_highratio_insulated_4a, dresdenzp_lowratio_insulated_4a,
dresdenzd_lowratio_insulated_4a, postdam_lowratio_insulated_4a])
def plots(plot, save=False):
"""
Creates box plots from all the wall scenarios
"""
if plot == 'uninsulated' or plot == 'all':
plt.figure('dresdenzp_highratio_uninsulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 60)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_uninsulated_4a_relhum")
plt.figure('dresdenzd_highratio_uninsulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 60)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_uninsulated_4a_relhum")
plt.figure('postdam_highratio_uninsulated_4a_relhum', figsize=(16, 8), tight_layout=True)
postdam_highratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 60)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_uninsulated_4a_relhum")
plt.figure('dresdenzp_lowratio_uninsulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 60)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_uninsulated_4a_relhum")
plt.figure('dresdenzd_lowratio_uninsulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 60)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_uninsulated_4a_relhum")
plt.figure('postdam_lowratio_uninsulated_4a_relhum', figsize=(16, 8), tight_layout=True)
postdam_lowratio_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 60)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_uninsulated_4a_relhum")
plt.figure('total_uninsulated_4a_relhum', figsize=(16, 8), tight_layout=True)
total_uninsulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 60)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: All - Mortar: All - Insulation: None')
if save:
plt.savefig(f"{graphic_folder}/total_uninsulated_4a_relhum")
if plot == 'insulated' or plot == 'all':
plt.figure('dresdenzp_highratio_insulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzp_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 65)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_highratio_insulated_4a_relhum")
plt.figure('dresdenzd_highratio_insulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzd_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 65)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_highratio_insulated_4a_relhum")
plt.figure('postdam_highratio_insulated_4a_relhum', figsize=(16, 8), tight_layout=True)
postdam_highratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 65)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_highratio_insulated_4a_relhum")
plt.figure('dresdenzp_lowratio_insulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzp_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 65)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzp_lowratio_insulated_4a_relhum")
plt.figure('dresdenzd_lowratio_insulated_4a_relhum', figsize=(16, 8), tight_layout=True)
dresdenzd_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 65)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/dresdenzd_lowratio_insulated_4a_relhum")
plt.figure('postdam_lowratio_insulated_4a_relhum', figsize=(16, 8), tight_layout=True)
postdam_lowratio_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 65)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/postdam_lowratio_insulated_4a_relhum")
plt.figure('total_insulated_4a_relhum', figsize=(16, 8), tight_layout=True)
total_insulated_4a.boxplot(showfliers=False)
plt.ylim(-5, 65)
plt.ylabel('Relative Difference in %')
plt.title('Weighted Relative Difference between 1D and 2D\n'
'Relative Humidity\n'
'Brick: All - Mortar: All - Insulation: Calcium Silicate')
if save:
plt.savefig(f"{graphic_folder}/total_insulated_4a_relhum")
plt.show()
plots('uninsulated', False)
def std3_ratio(print_=False, excel=False):
"""Computes ratio of outliers in the data sets. Outliers is here defined as data points deviating with more
the 3 standard deviations from the mean."""
std3_uninsulated_ratio_ = uninsulated()
std3_insulated_ratio_ = insulated()
if print_:
print('Uninsulated')
print(std3_uninsulated_ratio_)
print('')
print('Insulated')
print(std3_insulated_ratio_)
if excel:
writer = pd.ExcelWriter(f'{out_folder}/relhum_std_ratios.xlsx')
std3_uninsulated_ratio_.to_excel(writer, 'Uninsulated')
std3_insulated_ratio_.to_excel(writer, 'Insulated')
writer.save()
def uninsulated():
"""Computes the outliers for the uninsulated cases"""
outliers_total_uninsulated = (total_uninsulated_4a.shape[0] -
total_uninsulated_4a.sub(total_uninsulated_4a.mean())
.div(total_uninsulated_4a.std()).abs().lt(3).sum()) / total_uninsulated_4a.shape[0]
outliers_zd_high_uninsulated = (dresdenzd_highratio_uninsulated_4a.shape[0] -
dresdenzd_highratio_uninsulated_4a.sub(dresdenzd_highratio_uninsulated_4a.mean())
.div(dresdenzd_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_uninsulated_4a.shape[0]
outliers_zp_high_uninsulated = (dresdenzp_highratio_uninsulated_4a.shape[0] -
dresdenzp_highratio_uninsulated_4a.sub(dresdenzp_highratio_uninsulated_4a.mean())
.div(dresdenzp_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_uninsulated_4a.shape[0]
outliers_pd_high_uninsulated = (postdam_highratio_uninsulated_4a.shape[0] -
postdam_highratio_uninsulated_4a.sub(postdam_highratio_uninsulated_4a.mean())
.div(postdam_highratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_uninsulated_4a.shape[0]
outliers_zd_low_uninsulated = (dresdenzd_lowratio_uninsulated_4a.shape[0] -
dresdenzd_lowratio_uninsulated_4a.sub(dresdenzd_lowratio_uninsulated_4a.mean())
.div(dresdenzd_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_uninsulated_4a.shape[0]
outliers_zp_low_uninsulated = (dresdenzp_lowratio_uninsulated_4a.shape[0] -
dresdenzp_lowratio_uninsulated_4a.sub(dresdenzp_lowratio_uninsulated_4a.mean())
.div(dresdenzp_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_uninsulated_4a.shape[0]
outliers_pd_low_uninsulated = (postdam_lowratio_uninsulated_4a.shape[0] -
postdam_lowratio_uninsulated_4a.sub(postdam_lowratio_uninsulated_4a.mean())
.div(postdam_lowratio_uninsulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_uninsulated_4a.shape[0]
outliers_uninsulated_ratio_ = pd.concat([outliers_total_uninsulated, outliers_zd_high_uninsulated,
outliers_zp_high_uninsulated, outliers_pd_high_uninsulated,
outliers_zd_low_uninsulated, outliers_zp_low_uninsulated,
outliers_pd_low_uninsulated], axis=1)
outliers_uninsulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: None",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: None",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: None"]
return outliers_uninsulated_ratio_
def insulated():
"""Computes the outliers for the insulated cases"""
outliers_total_insulated = (total_insulated_4a.shape[0] - total_insulated_4a.sub(total_insulated_4a.mean())
.div(total_insulated_4a.std()).abs().lt(3).sum()) / total_insulated_4a.shape[0]
outliers_zd_high_insulated = (dresdenzd_highratio_insulated_4a.shape[0] -
dresdenzd_highratio_insulated_4a.sub(dresdenzd_highratio_insulated_4a.mean())
.div(dresdenzd_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_highratio_insulated_4a.shape[0]
outliers_zp_high_insulated = (dresdenzp_highratio_insulated_4a.shape[0] -
dresdenzp_highratio_insulated_4a.sub(dresdenzp_highratio_insulated_4a.mean())
.div(dresdenzp_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_highratio_insulated_4a.shape[0]
outliers_pd_high_insulated = (postdam_highratio_insulated_4a.shape[0] -
postdam_highratio_insulated_4a.sub(postdam_highratio_insulated_4a.mean())
.div(postdam_highratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_highratio_insulated_4a.shape[0]
outliers_zd_low_insulated = (dresdenzd_lowratio_insulated_4a.shape[0] -
dresdenzd_lowratio_insulated_4a.sub(dresdenzd_lowratio_insulated_4a.mean())
.div(dresdenzd_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzd_lowratio_insulated_4a.shape[0]
outliers_zp_low_insulated = (dresdenzp_lowratio_insulated_4a.shape[0] -
dresdenzp_lowratio_insulated_4a.sub(dresdenzp_lowratio_insulated_4a.mean())
.div(dresdenzp_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ dresdenzp_lowratio_insulated_4a.shape[0]
outliers_pd_low_insulated = (postdam_lowratio_insulated_4a.shape[0] -
postdam_lowratio_insulated_4a.sub(postdam_lowratio_insulated_4a.mean())
.div(postdam_lowratio_insulated_4a.std()).abs().lt(3).sum()) \
/ postdam_lowratio_insulated_4a.shape[0]
std2_insulated_ratio_ = pd.concat([outliers_total_insulated, outliers_zd_high_insulated,
outliers_zp_high_insulated, outliers_pd_high_insulated,
outliers_zd_low_insulated, outliers_zp_low_insulated,
outliers_pd_low_insulated], axis=1)
std2_insulated_ratio_.columns = ["Brick: All - Mortar: All - Insulation: None",
"Brick: Dresden ZD - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: High Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZD - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Dresden ZP - Mortar: Low Cement Ratio - Insulation: Calcium Silicate",
"Brick: Potsdam - Mortar: Low Cement Ratio - Insulation: Calcium Silicate"]
return std2_insulated_ratio_
#std3_ratio(False, True)
|
mit
|
hsiaoyi0504/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
120
|
10082
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
|
bsd-3-clause
|
ECP-CANDLE/Benchmarks
|
common/uq_utils.py
|
1
|
46831
|
from __future__ import absolute_import
import numpy as np
from scipy.stats import pearsonr, spearmanr
from scipy import signal
from scipy.interpolate import InterpolatedUnivariateSpline
def generate_index_distribution(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified blocks and that the block indices describe a coherent
partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_fr, uq_valid_fr, uq_test_fr for fraction specification,
uq_train_vec, uq_valid_vec, uq_test_vec for block list specification, and
uq_train_bks, uq_valid_bks, uq_test_bks for block number specification)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
if all (k in params for k in ('uq_train_fr', 'uq_valid_fr', 'uq_test_fr')):
# specification by fraction
print("Computing UQ cross-validation - Distributing by FRACTION")
return generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params)
elif all (k in params for k in ('uq_train_vec', 'uq_valid_vec', 'uq_test_vec')):
# specification by block list
print("Computing UQ cross-validation - Distributing by BLOCK LIST")
return generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params)
elif all (k in params for k in ('uq_train_bks', 'uq_valid_bks', 'uq_test_bks')):
# specification by block size
print("Computing UQ cross-validation - Distributing by BLOCK NUMBER")
return generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params)
else:
print("ERROR !! No consistent UQ parameter specification found !! ... exiting ")
raise KeyError("No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)")
def generate_index_distribution_from_fraction(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
It checks that the fractions provided are (0, 1) and add up to 1.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_fr, uq_valid_fr, uq_test_fr)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
tol = 1e-7
# Extract required parameters
fractionTrain = params['uq_train_fr']
fractionValidation = params['uq_valid_fr']
fractionTest = params['uq_test_fr']
if (fractionTrain < 0.) or (fractionTrain > 1.):
raise ValueError('uq_train_fr is not in (0, 1) range. uq_train_fr: ', fractionTrain)
if (fractionValidation < 0.) or (fractionValidation > 1.):
raise ValueError('uq_valid_fr is not in (0, 1) range. uq_valid_fr: ', fractionValidation)
if (fractionTest < 0.) or (fractionTest > 1.):
raise ValueError('uq_test_fr is not in (0, 1) range. uq_test_fr: ', fractionTest)
fractionSum = fractionTrain + fractionValidation + fractionTest
#if (fractionSum > 1.) or (fractionSum < 1.):
if abs(fractionSum-1.) > tol:
raise ValueError('Specified UQ fractions (uq_train_fr, uq_valid_fr, uq_test_fr) do not add up to 1. No cross-validation partition is computed ! sum:', fractionSum)
# Determine data size and block size
if fractionTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
sizeTraining = int(np.round(numData * fractionTrain))
sizeValidation = int(np.round(numData * fractionValidation))
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if fractionValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining+sizeValidation]
# Fill test partition
indexTest = None
if fractionTest > 0:
indexTest = Folds[sizeTraining+sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified block quantities and that the block quantities describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_bks, uq_valid_bks, uq_test_bks)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
numBlocksTrain = params['uq_train_bks']
numBlocksValidation = params['uq_valid_bks']
numBlocksTest = params['uq_test_bks']
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
# Determine data size and block size
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Testing (if specified) or Validation (if specified) will use different block size.")
sizeTraining = numBlocksTrain * blockSize
sizeValidation = numBlocksValidation * blockSize
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
indexValidation = Folds[sizeTraining:sizeTraining+sizeValidation]
# Fill test partition
indexTest = None
if numBlocksTest > 0:
indexTest = Folds[sizeTraining+sizeValidation:]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_block_list(numTrain, numTest, numValidation, params):
""" Generates a vector of indices to partition the data for training.
NO CHECKING IS DONE: it is assumed that the data could be partitioned
in the specified list of blocks and that the block indices describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_vec, uq_valid_vec, uq_test_vec)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
blocksTrain = params['uq_train_vec']
blocksValidation = params['uq_valid_vec']
blocksTest = params['uq_test_vec']
# Determine data size and block size
numBlocksTrain = len(blocksTrain)
numBlocksValidation = len(blocksValidation)
numBlocksTest = len(blocksTest)
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print("Warning ! Requested partition does not distribute data evenly between blocks. "
"Last block will have different size.")
if remainder < 0:
remainder = 0
# Fill partition indices
# Fill train partition
maxSizeTrain = blockSize * numBlocksTrain + remainder
indexTrain = fill_array(blocksTrain, maxSizeTrain, numData, numBlocksTotal, blockSize)
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
maxSizeValidation = blockSize * numBlocksValidation + remainder
indexValidation = fill_array(blocksValidation, maxSizeValidation, numData, numBlocksTotal, blockSize)
# Fill test partition
indexTest = None
if numBlocksTest > 0:
maxSizeTest = blockSize * numBlocksTest + remainder
indexTest = fill_array(blocksTest, maxSizeTest, numData, numBlocksTotal, blockSize)
return indexTrain, indexValidation, indexTest
def compute_limits(numdata, numblocks, blocksize, blockn):
""" Generates the limit of indices corresponding to a
specific block. It takes into account the non-exact
divisibility of numdata into numblocks letting the
last block to take the extra chunk.
Parameters
----------
numdata : int
Total number of data points to distribute
numblocks : int
Total number of blocks to distribute into
blocksize : int
Size of data per block
blockn : int
Index of block, from 0 to numblocks-1
Return
----------
start : int
Position to start assigning indices
end : int
One beyond position to stop assigning indices
"""
start = blockn * blocksize
end = start + blocksize
if blockn == (numblocks-1): # last block gets the extra
end = numdata
return start, end
def fill_array(blocklist, maxsize, numdata, numblocks, blocksize):
""" Fills a new array of integers with the indices corresponding
to the specified block structure.
Parameters
----------
blocklist : list
List of integers describen the block indices that
go into the array
maxsize : int
Maximum possible length for the partition (the size of the
common block size plus the remainder, if any).
numdata : int
Total number of data points to distribute
numblocks : int
Total number of blocks to distribute into
blocksize : int
Size of data per block
Return
----------
indexArray : int numpy array
Indices for specific data partition. Resizes the array
to the correct length.
"""
indexArray = np.zeros(maxsize, np.int)
offset = 0
for i in blocklist:
start, end = compute_limits(numdata, numblocks, blocksize, i)
length = end - start
indexArray[offset:offset+length] = np.arange(start, end)
offset += length
return indexArray[:offset]
###### UTILS for COMPUTATION OF EMPIRICAL CALIBRATION
def compute_statistics_homoscedastic(df_data,
col_true=0,
col_pred=6,
col_std_pred=7,
):
""" Extracts ground truth, mean predition, error and
standard deviation of prediction from inference
data frame. The latter includes the statistics
over all the inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current CANDLE inference
experiments. Indices are hard coded to agree with
current CANDLE version. (The inference file usually
has the name: <model>_pred.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 0, index in current CANDLE format).
col_pred : integer
Index of the column in the data frame where the predicted
value is stored (Default: 6, index in current CANDLE format).
col_std_pred : integer
Index of the column in the data frame where the standard
deviation of the predicted values is stored (Default: 7,
index in current CANDLE format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:,col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred = df_data.iloc[:,col_pred].values
print('Ypred shape: ', Ypred.shape)
Ypred_std = df_data.iloc[:,col_std_pred].values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred
print('yerror shape: ', yerror.shape)
sigma = Ypred_std # std
MSE = np.mean((Ytrue - Ypred)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred, yerror, sigma, Ypred_std, pred_name
def compute_statistics_homoscedastic_all(df_data,
col_true=4,
col_pred_start=6
):
""" Extracts ground truth, mean predition, error and
standard deviation of prediction from inference
data frame. The latter includes all the individual
inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current CANDLE inference
experiments. Indices are hard coded to agree with
current CANDLE version. (The inference file usually
has the name: <model>.predicted_INFER.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current HOM format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored (Default: 6 index, in current HOM format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:,col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:,col_pred_start:], axis=1)
Ypred_mean = Ypred_mean_.values
print('Ypred_mean shape: ', Ypred_mean.shape)
Ypred_std_ = np.std(df_data.iloc[:,col_pred_start:], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
sigma = Ypred_std # std
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_heteroscedastic(df_data,
col_true=4,
col_pred_start=6,
col_std_pred_start=7,
):
""" Extracts ground truth, mean predition, error, standard
deviation of prediction and predicted (learned) standard
deviation from inference data frame. The latter includes
all the individual inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current heteroscedastic inference
experiments. Indices are hard coded to agree with
current version. (The inference file usually
has the name: <model>.predicted_INFER_HET.tsv).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current HET format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored and are interspaced with standard deviation
predictions (Default: 6 index, step 2, in current HET format).
col_std_pred_start : integer
Index of the column in the data frame where the first predicted
standard deviation value is stored. All the predicted values
during inference are stored and are interspaced with predictions
(Default: 7 index, step 2, in current HET format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. For homoscedastic inference this corresponds to the
std value computed from prediction (and is equal to the
following returned variable).
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
"""
Ytrue = df_data.iloc[:,col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_mean_ = np.mean(df_data.iloc[:,col_pred_start::2], axis=1)
Ypred_mean = Ypred_mean_.values
print('Ypred shape: ', Ypred_mean.shape)
Ypred_std_ = np.std(df_data.iloc[:,col_pred_start::2], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
s_ = df_data.iloc[:,col_std_pred_start::2]
s_mean = np.mean(s_, axis=1)
var = np.exp(s_mean.values) # variance
sigma = np.sqrt(var) # std
print('sigma shape: ', sigma.shape)
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name
def compute_statistics_quantile(df_data,
sigma_divisor=2.56,
col_true=4,
col_pred_start=6
):
""" Extracts ground truth, 50th percentile mean predition,
low percentile and high percentile mean prediction
(usually 10th percentile and 90th percentile respectively),
error (using 50th percentile), standard deviation of
prediction (using 50th percentile) and predicted (learned)
standard deviation from interdecile range in inference data frame.
The latter includes all the individual inference realizations.
Parameters
----------
df_data : pandas data frame
Data frame generated by current quantile inference
experiments. Indices are hard coded to agree with
current version. (The inference file usually
has the name: <model>.predicted_INFER_QTL.tsv).
sigma_divisor : float
Divisor to convert from the intercedile range to the corresponding
standard deviation for a Gaussian distribution.
(Default: 2.56, consisten with an interdecile range computed from
the difference between the 90th and 10th percentiles).
col_true : integer
Index of the column in the data frame where the true
value is stored (Default: 4, index in current QTL format).
col_pred_start : integer
Index of the column in the data frame where the first predicted
value is stored. All the predicted values during inference
are stored and are interspaced with other percentile
predictions (Default: 6 index, step 3, in current QTL format).
Return
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values (based on the 50th percentile).
yerror : numpy array
Array with errors computed (observed - predicted).
sigma : numpy array
Array with standard deviations learned with deep learning
model. This corresponds to the interdecile range divided
by the sigma divisor.
Ypred_std : numpy array
Array with standard deviations computed from regular
(homoscedastic) inference.
pred_name : string
Name of data colum or quantity predicted (as extracted
from the data frame using the col_true index).
Ypred_Lp_mean : numpy array
Array with predicted values of the lower percentile
(usually the 10th percentile).
Ypred_Hp_mean : numpy array
Array with predicted values of the higher percentile
(usually the 90th percentile).
"""
Ytrue = df_data.iloc[:,col_true].values
print('Ytrue shape: ', Ytrue.shape)
pred_name = df_data.columns[col_true]
Ypred_50q_mean = np.mean(df_data.iloc[:,col_pred_start::3], axis=1)
Ypred_mean = Ypred_50q_mean.values
print('Ypred shape: ', Ypred_mean.shape)
Ypred_Lp_mean_ = np.mean(df_data.iloc[:,col_pred_start+1::3], axis=1)
Ypred_Hp_mean_ = np.mean(df_data.iloc[:,col_pred_start+2::3], axis=1)
Ypred_Lp_mean = Ypred_Lp_mean_.values
Ypred_Hp_mean = Ypred_Hp_mean_.values
interdecile_range = Ypred_Hp_mean - Ypred_Lp_mean
sigma = interdecile_range / sigma_divisor
print('sigma shape: ', sigma.shape)
yerror = Ytrue - Ypred_mean
print('yerror shape: ', yerror.shape)
Ypred_std_ = np.std(df_data.iloc[:,col_pred_start::3], axis=1)
Ypred_std = Ypred_std_.values
print('Ypred_std shape: ', Ypred_std.shape)
MSE = np.mean((Ytrue - Ypred_mean)**2)
print('MSE: ', MSE)
MSE_STD = np.std((Ytrue - Ypred_mean)**2)
print('MSE_STD: ', MSE_STD)
# p-value 'not entirely reliable, reasonable for datasets > 500'
spearman_cc, pval = spearmanr(Ytrue, Ypred_mean)
print('Spearman CC: %f, p-value: %e' % (spearman_cc, pval))
return Ytrue, Ypred_mean, yerror, sigma, Ypred_std, pred_name, Ypred_Lp_mean, Ypred_Hp_mean
def split_data_for_empirical_calibration(Ytrue, Ypred, sigma, cal_split=0.8):
""" Extracts a portion of the arrays provided for the computation
of the calibration and reserves the remainder portion
for testing.
Parameters
----------
Ytrue : numpy array
Array with true (observed) values
Ypred : numpy array
Array with predicted values.
sigma : numpy array
Array with standard deviations learned with deep learning
model (or std value computed from prediction if homoscedastic
inference).
cal_split : float
Split of data to use for estimating the calibration relationship.
It is assumet that it will be a value in (0, 1).
(Default: use 80% of predictions to generate empirical
calibration).
Return
----------
index_perm_total : numpy array
Random permutation of the array indices. The first 'num_cal'
of the indices correspond to the samples that are used for
calibration, while the remainder are the samples reserved
for calibration testing.
pSigma_cal : numpy array
Part of the input sigma array to use for calibration.
pSigma_test : numpy array
Part of the input sigma array to reserve for testing.
pPred_cal : numpy array
Part of the input Ypred array to use for calibration.
pPred_test : numpy array
Part of the input Ypred array to reserve for testing.
true_cal : numpy array
Part of the input Ytrue array to use for calibration.
true_test : numpy array
Part of the input Ytrue array to reserve for testing.
"""
# shuffle data for calibration
num_pred_total = sigma.shape[0]
num_cal = np.int(num_pred_total * cal_split)
index_perm_total = np.random.permutation(range(num_pred_total))
# Permute data
pSigma_perm_all = sigma[index_perm_total]
pPred_perm_all = Ypred[index_perm_total]
true_perm_all = Ytrue[index_perm_total]
# Split in calibration and testing
pSigma_cal = pSigma_perm_all[:num_cal]
pSigma_test = pSigma_perm_all[num_cal:]
pPred_cal = pPred_perm_all[:num_cal]
pPred_test = pPred_perm_all[num_cal:]
true_cal = true_perm_all[:num_cal]
true_test = true_perm_all[num_cal:]
print('Size of calibration set: ', true_cal.shape)
print('Size of test set: ', true_test.shape)
return index_perm_total, pSigma_cal, pSigma_test, pPred_cal, pPred_test, true_cal, true_test
def compute_empirical_calibration(pSigma_cal, pPred_cal, true_cal, bins, coverage_percentile):
""" Use the arrays provided to estimate an empirical mapping
between standard deviation and absolute value of error,
both of which have been observed during inference. Since
most of the times the raw statistics per bin are very noisy,
a smoothing step (based on scipy's savgol filter) is performed.
Parameters
----------
pSigma_cal : numpy array
Part of the standard deviations array to use for calibration.
pPred_cal : numpy array
Part of the predictions array to use for calibration.
true_cal : numpy array
Part of the true (observed) values array to use for calibration.
bins : int
Number of bins to split the range of standard deviations
included in pSigma_cal array.
coverage_percentile : float
Value to use for estimating coverage when evaluating the percentiles
of the observed absolute value of errors.
Return
----------
mean_sigma : numpy array
Array with the mean standard deviations computed per bin.
min_sigma : numpy array
Array with the minimum standard deviations computed per bin.
max_sigma : numpy array
Array with the maximum standard deviations computed per bin.
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
err_err : numpy array
Error bars in errors (one standard deviation for a binomial
distribution estimated by bin vs. the other bins) for the
calibration error.
error_thresholds_smooth : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin after a smoothed operation is applied
to the frequently noisy bin-based estimations.
sigma_start_index : non-negative integer
Index in the mean_sigma array that defines the start of
the valid empirical calibration interval (i.e. index to
the smallest std for which a meaningful error mapping
is obtained).
sigma_end_index : non-negative integer
Index in the mean_sigma array that defines the end of
the valid empirical calibration interval (i.e. index to
the largest std for which a meaningful error mappping
is obtained).
s_interpolate : scipy.interpolate python object
A python object from scipy.interpolate that computes a
univariate spline (InterpolatedUnivariateSpline) constructed
to express the mapping from standard deviation to error. This
spline is generated during the computational empirical
calibration procedure.
"""
index_sigma_cal = np.argsort(pSigma_cal)
pSigma_cal_ordered_ = pSigma_cal[index_sigma_cal]
Er_vect_cal_ = np.abs(true_cal - pPred_cal)
Er_vect_cal_orderedSigma_ = Er_vect_cal_[index_sigma_cal]
minL_sigma = np.min(pSigma_cal_ordered_)
maxL_sigma = np.max(pSigma_cal_ordered_)
print('Complete Sigma range --> Min: %f, Max: %f' % (minL_sigma, maxL_sigma))
# Bin statistics for error and sigma
mean_sigma, min_sigma, max_sigma, error_thresholds, err_err = bining_for_calibration(pSigma_cal_ordered_,
minL_sigma,
maxL_sigma,
Er_vect_cal_orderedSigma_,
bins,
coverage_percentile)
# smooth error function
#scipy.signal.savgol_filter(x, window_length, polyorder,
#deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0)
#error_thresholds_smooth = signal.savgol_filter(error_thresholds, 5, 1)
error_thresholds_smooth = signal.savgol_filter(error_thresholds, 5, 1, mode='nearest')
# Build Interpolant over smooth plot (this will become the calibration function)
s_interpolate = InterpolatedUnivariateSpline(mean_sigma, error_thresholds_smooth)
# Determine limits of calibration (i.e. monotonicity range)
sigma_start_index, sigma_end_index = computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err)
print('Range of valid sigma: %.6f --> %.6f' % (mean_sigma[sigma_start_index], mean_sigma[sigma_end_index]))
return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err, error_thresholds_smooth, sigma_start_index, sigma_end_index, s_interpolate
def bining_for_calibration(pSigma_cal_ordered_, minL_sigma,
maxL_sigma, Er_vect_cal_orderedSigma_,
bins, coverage_percentile):
""" Bin the values of the standard deviations observed during
inference and estimate a specified coverage percentile
in the absolute error (observed during inference as well).
Bins that have less than 50 samples are merged until they
surpass this threshold.
Parameters
----------
pSigma_cal_ordered_ : numpy array
Array of standard deviations ordered in ascending way.
minL_sigma : float
Minimum value of standard deviations included in
pSigma_cal_ordered_ array.
maxL_sigma : numpy array
Maximum value of standard deviations included in
pSigma_cal_ordered_ array.
Er_vect_cal_orderedSigma_ : numpy array
Array ob absolute value of errors corresponding with
the array of ordered standard deviations.
bins : int
Number of bins to split the range of standard deviations
included in pSigma_cal_ordered_ array.
coverage_percentile : float
Value to use for estimating coverage when evaluating the percentiles
of the observed absolute value of errors.
Return
----------
mean_sigma : numpy array
Array with the mean standard deviations computed per bin.
min_sigma : numpy array
Array with the minimum standard deviations computed per bin.
max_sigma : numpy array
Array with the maximum standard deviations computed per bin.
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
err_err : numpy array
Error bars in errors (one standard deviation for a binomial
distribution estimated by bin vs. the other bins) for the
calibration error.
"""
#thresholds = np.logspace(np.log10(minL_sigma), np.log10(maxL_sigma), num=bins)
thresholds = np.linspace(minL_sigma, maxL_sigma, num=bins)
classes = np.digitize(pSigma_cal_ordered_, thresholds)
Nbin = np.zeros(bins+1)
for i in range(bins+1):
indices = (classes == i)
Nbin[i] = indices.sum()
# Repair bins
new_thresholds_l = []
new_nbins_l = []
sumN = 0
for i in range(Nbin.shape[0]):
sumN += Nbin[i]
if sumN > 50:
if i > (thresholds.shape[0] - 1):
new_thresholds_l.append(thresholds[-1])
else:
new_thresholds_l.append(thresholds[i])
new_nbins_l.append(sumN)
sumN = 0
new_thresholds = np.array(new_thresholds_l)
new_nbins = np.array(new_nbins_l)
new_thresholds[-1] = thresholds[-1]
new_nbins[-1] += sumN
#
classes = np.digitize(pSigma_cal_ordered_, new_thresholds[:-1])
error_thresholds = -1. * np.ones(new_nbins.shape[0])
mean_sigma = -1. * np.ones(new_nbins.shape[0])
min_sigma = -1. * np.ones(new_nbins.shape[0])
max_sigma = -1. * np.ones(new_nbins.shape[0])
err_err = -1. * np.ones(new_nbins.shape[0])
Ncal = pSigma_cal_ordered_.shape[0]
for i in range(error_thresholds.shape[0]):
indices = (classes == i)
n_aux = indices.sum()
assert n_aux == new_nbins[i]
print('Points in bin %d: %d' % (i, n_aux))
mean_sigma[i] = np.mean(pSigma_cal_ordered_[indices])
min_sigma[i] = np.min(pSigma_cal_ordered_[indices])
max_sigma[i] = np.max(pSigma_cal_ordered_[indices])
error_thresholds[i] = np.percentile(Er_vect_cal_orderedSigma_[indices], coverage_percentile)
err_err[i] = np.sqrt(new_nbins[i] * (Ncal - new_nbins[i])) / Ncal * error_thresholds[i]
return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err
def computation_of_valid_calibration_interval(error_thresholds, error_thresholds_smooth, err_err):
""" Function that estimates the empirical range in which a
monotonic relation is observed between standard deviation
and coverage of absolute value of error. Since the
statistics computed per bin are relatively noisy, the
application of a greedy criterion (e.g. guarantee a
monotonically increasing relationship) does not yield
good results. Therefore, a softer version is constructed
based on the satisfaction of certain criteria depending
on: the values of the error coverage computed per bin,
a smoothed version of them and the assocatiate error
estimated (based on one standard deviation for a binomial
distribution estimated by bin vs. the other bins).
A minimal validation requiring the end idex to be
largest than the starting index is performed before
the function return.
Current criteria:
- the smoothed errors are inside the error bars AND
they are almost increasing (a small tolerance is
allowed, so a small wobbliness in the smoother
values is permitted).
OR
- both the raw values for the bins (with a small tolerance)
are increasing, AND the smoothed value is greater than the
raw value.
OR
- the current smoothed value is greater than the previous AND
the smoothed values for the next been are inside the error
bars.
Parameters
----------
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
error_thresholds_smooth : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin after a smoothed operation is applied
to the frequently noisy bin-based estimations.
err_err : numpy array
Error bars in errors (one standard deviation for a binomial
distribution estimated by bin vs. the other bins) for the
calibration error.
Return
----------
sigma_start_index : non-negative integer
Index estimated in the mean_sigma array corresponing to
the value that defines the start of the valid empirical
calibration interval (i.e. index to the smallest std for
which a meaningful error mapping is obtained, according
to the criteria explained before).
sigma_end_index : non-negative integer
Index estimated in the mean_sigma array corresponing to
the value that defines the end of the valid empirical
calibration interval (i.e. index to the largest std for
which a meaningful error mapping is obtained, according
to the criteria explained before).
"""
# Computation of the calibration interval
limitH = error_thresholds + err_err
limitL = error_thresholds - err_err
# search for starting point
for i in range(err_err.shape[0]):
if ((error_thresholds_smooth[i] >= limitL[i]) and
(error_thresholds_smooth[i] <= limitH[i])): # Ask if the current is in the interval
sigma_start_index = i
break
sigma_end_index = sigma_start_index - 1
restart = max(1, sigma_start_index)
for i in range(restart, err_err.shape[0]-1):
if (((error_thresholds_smooth[i] >= limitL[i]) and
(error_thresholds_smooth[i] <= limitH[i]) and
((error_thresholds_smooth[i] * 1.005 > error_thresholds_smooth[i-1]) or
((error_thresholds[i] * 1.01 > error_thresholds[i-1]) and
(error_thresholds_smooth[i] > error_thresholds[i])))) # Ask if the current is in the interval with slightly increasing trend
or # Ask if the current is greater than the previous and the next is in the interval
((error_thresholds_smooth[i] > error_thresholds_smooth[i-1]) and
((error_thresholds_smooth[i+1] >= limitL[i+1]) and
(error_thresholds_smooth[i+1] <= limitH[i+1])))):
sigma_end_index = i
else: # Finalize search for monotonic range
if (sigma_end_index - sigma_start_index) > 4:
break
else: # Reset indices
sigma_start_index = i + 1
sigma_end_index = i
print('Range of valid sigma indices (inclusive): %d --> %d' % (sigma_start_index, sigma_end_index))
assert (sigma_end_index > sigma_start_index)
return sigma_start_index, sigma_end_index
def applying_calibration(pSigma_test, pPred_test, true_test, s_interpolate, minL_sigma_auto, maxL_sigma_auto):
""" Use the empirical mapping between standard deviation and
absolute value of error estimated during calibration (i.e.
apply the univariate spline computed) to estimate the error
for the part of the standard deviation array that was reserved
for testing the empirical calibration. The resulting error array
(yp_test) should overestimate the true observed error (eabs_red).
All the computations are restricted to the valid calibration
interval: [minL_sigma_auto, maxL_sigma_auto].
Parameters
----------
pSigma_test : numpy array
Part of the standard deviations array to use for calibration testing.
pPred_test : numpy array
Part of the predictions array to use for calibration testing.
true_test : numpy array
Part of the true (observed) values array to use for calibration testing.
s_interpolate : scipy.interpolate python object
A python object from scipy.interpolate that computes a
univariate spline (InterpolatedUnivariateSpline) expressing
the mapping from standard deviation to error. This
spline is generated during the computational empirical
calibration procedure.
minL_sigma_auto : float
Starting value of the valid empirical calibration interval
(i.e. smallest std for which a meaningful error mapping
is obtained).
maxL_sigma_auto : float
Ending value of the valid empirical calibration interval
(i.e. largest std for which a meaningful error mappping
is obtained).
Return
----------
index_sigma_range_test : numpy array
Indices of the pSigma_test array that are included in the
valid calibration interval, given by:
[minL_sigma_auto, maxL_sigma_auto].
xp_test : numpy array
Array with the mean standard deviations in the calibration
testing array.
yp_test : numpy array
Mapping of the given standard deviation to error computed
from the interpolation spline constructed by empirical
calibration.
eabs_red : numpy array
Array with the observed abolute errors in the part of the testing
array for which the observed standard deviations are in the
valid interval of calibration.
"""
# Filter to appropriate range
index_sigma_range_test = (pSigma_test >= minL_sigma_auto) & (pSigma_test < maxL_sigma_auto)
xp_test = pSigma_test[index_sigma_range_test]
yp_test = s_interpolate(xp_test)
Er_vect_ = true_test - pPred_test
eabs_ = np.abs(Er_vect_)
eabs_red = eabs_[index_sigma_range_test]
return index_sigma_range_test, xp_test, yp_test, eabs_red
def overprediction_check(yp_test, eabs_red):
""" Compute the percentage of overestimated absoulte error
predictions for the arrays reserved for calibration testing
and whose corresponding standard deviations are included
in the valid calibration interval.
Parameters
----------
yp_test : numpy array
Mapping of the standard deviation to error computed
from the interpolation spline constructed by empirical
calibration.
eabs_red : numpy array
Array with the observed abolute errors in the part of the testing
array for which the observed standard deviations are in the
valid interval of calibration.
"""
over_pred_error_index = (yp_test >= eabs_red)
percentage_over_predicted = (over_pred_error_index.sum() / yp_test.shape[0])
print("percentage over predicted: ", percentage_over_predicted)
|
mit
|
elijah513/scikit-learn
|
examples/linear_model/plot_bayesian_ridge.py
|
248
|
2588
|
"""
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
rth/PyKrige
|
pykrige/ok3d.py
|
1
|
35990
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__doc__ = """
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Contains class OrdinaryKriging3D.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
Copyright (c) 2015-2018, PyKrige Developers
"""
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from . import variogram_models
from . import core
from .core import _adjust_for_anisotropy, _initialize_variogram_model, \
_make_variogram_parameter_list, _find_statistics
import warnings
class OrdinaryKriging3D:
"""Three-dimensional ordinary kriging
Parameters
----------
x : array_like
X-coordinates of data points.
y : array_like
Y-coordinates of data points.
z : array_like
Z-coordinates of data points.
val : array_like
Values at data points.
variogram_model : str, optional
Specified which variogram model to use; may be one of the following:
linear, power, gaussian, spherical, exponential, hole-effect.
Default is linear variogram model. To utilize a custom variogram model,
specify 'custom'; you must also provide variogram_parameters and
variogram_function. Note that the hole-effect model is only technically
correct for one-dimensional problems.
variogram_parameters : list or dict, optional
Parameters that define the specified variogram model. If not provided,
parameters will be automatically calculated using a "soft" L1 norm
minimization scheme. For variogram model parameters provided in a dict,
the required dict keys vary according to the specified variogram
model: ::
linear - {'slope': slope, 'nugget': nugget}
power - {'scale': scale, 'exponent': exponent, 'nugget': nugget}
gaussian - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
spherical - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
exponential - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
hole-effect - {'sill': s, 'range': r, 'nugget': n}
OR
{'psill': p, 'range': r, 'nugget':n}
Note that either the full sill or the partial sill
(psill = sill - nugget) can be specified in the dict.
For variogram model parameters provided in a list, the entries
must be as follows: ::
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [sill, range, nugget]
spherical - [sill, range, nugget]
exponential - [sill, range, nugget]
hole-effect - [sill, range, nugget]
Note that the full sill (NOT the partial sill) must be specified
in the list format.
For a custom variogram model, the parameters are required, as custom
variogram models will not automatically be fit to the data.
Furthermore, the parameters must be specified in list format, in the
order in which they are used in the callable function (see
variogram_function for more information). The code does not check
that the provided list contains the appropriate number of parameters
for the custom variogram model, so an incorrect parameter list in
such a case will probably trigger an esoteric exception someplace
deep in the code.
NOTE that, while the list format expects the full sill, the code
itself works internally with the partial sill.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. The function must take only two arguments:
first, a list of parameters for the variogram model;
second, the distances at which to calculate the variogram model.
The list provided in variogram_parameters will be passed to the
function as the first argument.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : boolean, optional
Flag that specifies if semivariance at smaller lags should be weighted
more heavily when automatically calculating variogram model.
The routine is currently hard-coded such that the weights are
calculated from a logistic function, so weights at small lags are ~1
and weights at the longest lags are ~0; the center of the logistic
weighting is hard-coded to be at 70% of the distance from the shortest
lag to the largest lag. Setting this parameter to True indicates that
weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more
important in fitting a variogram model, so the option is provided
to enable such weighting.)
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in the y direction. Default is 1 (effectively no stretching).
Scaling is applied in the y direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in the z direction. Default is 1 (effectively no stretching).
Scaling is applied in the z direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_angle_x : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_y : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_z : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
verbose : bool, optional
Enables program text output to monitor kriging process.
Default is False (off).
enable_plotting : bool, optional
Enables plotting to display variogram. Default is False (off).
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
"""
eps = 1.e-10 # Cutoff for comparison to zero
variogram_dict = {'linear': variogram_models.linear_variogram_model,
'power': variogram_models.power_variogram_model,
'gaussian': variogram_models.gaussian_variogram_model,
'spherical': variogram_models.spherical_variogram_model,
'exponential': variogram_models.exponential_variogram_model,
'hole-effect': variogram_models.hole_effect_variogram_model}
def __init__(self, x, y, z, val, variogram_model='linear',
variogram_parameters=None, variogram_function=None, nlags=6,
weight=False, anisotropy_scaling_y=1., anisotropy_scaling_z=1.,
anisotropy_angle_x=0., anisotropy_angle_y=0.,
anisotropy_angle_z=0., verbose=False, enable_plotting=False):
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = \
np.atleast_1d(np.squeeze(np.array(x, copy=True, dtype=np.float64)))
self.Y_ORIG = \
np.atleast_1d(np.squeeze(np.array(y, copy=True, dtype=np.float64)))
self.Z_ORIG = \
np.atleast_1d(np.squeeze(np.array(z, copy=True, dtype=np.float64)))
self.VALUES = \
np.atleast_1d(np.squeeze(np.array(val, copy=True, dtype=np.float64)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG))/2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG))/2.0
self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG))/2.0
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
_adjust_for_anisotropy(np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z]).T
# set up variogram model and parameters...
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for "
"custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print("Initializing variogram model...")
vp_temp = _make_variogram_parameter_list(self.variogram_model,
variogram_parameters)
self.lags, self.semivariance, self.variogram_model_parameters = \
_initialize_variogram_model(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_model,
vp_temp, self.variogram_function,
nlags, weight, 'euclidean')
if self.verbose:
if self.variogram_model == 'linear':
print("Using '%s' Variogram Model" % 'linear')
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], '\n')
elif self.variogram_model == 'power':
print("Using '%s' Variogram Model" % 'power')
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
elif self.variogram_model == 'custom':
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print("Full Sill:", self.variogram_model_parameters[0] +
self.variogram_model_parameters[2])
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = \
_find_statistics(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_function,
self.variogram_model_parameters, 'euclidean')
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, '\n')
def update_variogram_model(self, variogram_model, variogram_parameters=None,
variogram_function=None, nlags=6, weight=False,
anisotropy_scaling_y=1., anisotropy_scaling_z=1.,
anisotropy_angle_x=0., anisotropy_angle_y=0.,
anisotropy_angle_z=0.):
"""Changes the variogram model and variogram parameters for
the kriging system.
Parameters
----------
variogram_model : str
May be any of the variogram models listed above.
May also be 'custom', in which case variogram_parameters and
variogram_function must be specified.
variogram_parameters : list or dict, optional
List or dict of variogram model parameters, as explained above.
If not provided, a best fit model will be calculated as
described above.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. See above for more information.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : bool, optional
Flag that specifies if semivariance at smaller lags should be
weighted more heavily when automatically calculating
variogram model. See above for more information. True indicates
that weights will be applied. Default is False.
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in y-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in z-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_angle_x : float, optional
Angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_y : float, optional
Angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_z : float, optional
Angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
"""
if anisotropy_scaling_y != self.anisotropy_scaling_y or anisotropy_scaling_z != self.anisotropy_scaling_z or \
anisotropy_angle_x != self.anisotropy_angle_x or anisotropy_angle_y != self.anisotropy_angle_y or \
anisotropy_angle_z != self.anisotropy_angle_z:
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \
_adjust_for_anisotropy(np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z]).T
self.variogram_model = variogram_model
if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom':
raise ValueError("Specified variogram model '%s' is not supported." % variogram_model)
elif self.variogram_model == 'custom':
if variogram_function is None or not callable(variogram_function):
raise ValueError("Must specify callable function for custom variogram model.")
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if self.verbose:
print("Updating variogram mode...")
vp_temp = _make_variogram_parameter_list(self.variogram_model,
variogram_parameters)
self.lags, self.semivariance, self.variogram_model_parameters = \
_initialize_variogram_model(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_model,
vp_temp, self.variogram_function,
nlags, weight, 'euclidean')
if self.verbose:
if self.variogram_model == 'linear':
print("Using '%s' Variogram Model" % 'linear')
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], '\n')
elif self.variogram_model == 'power':
print("Using '%s' Variogram Model" % 'power')
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
elif self.variogram_model == 'custom':
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print("Full Sill:", self.variogram_model_parameters[0] +
self.variogram_model_parameters[2])
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], '\n')
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = \
_find_statistics(np.vstack((self.X_ADJUSTED,
self.Y_ADJUSTED,
self.Z_ADJUSTED)).T,
self.VALUES, self.variogram_function,
self.variogram_model_parameters, 'euclidean')
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, '\n')
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, 'r*')
ax.plot(self.lags,
self.variogram_function(self.variogram_model_parameters,
self.lags), 'k-')
plt.show()
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*')
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
"""Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
"""
return self.Q1, self.Q2, self.cR
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n):
"""Assembles the kriging matrix."""
xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.Z_ADJUSTED[:, np.newaxis]), axis=1)
d = cdist(xyz, xyz, 'euclidean')
a = np.zeros((n+1, n+1))
a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
return a
def _exec_vector(self, a, bd, mask):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
b = np.zeros((npt, n+1, 1))
b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], zero_index[1], 0] = 0.0
b[:, n, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n+1, axis=1)
b = np.ma.array(b, mask=mask_b)
x = np.dot(a_inv, b.reshape((npt, n+1)).T).reshape((1, n+1, npt)).T
kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return kvalues, sigmasq
def _exec_loop(self, a, bd_all, mask):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python.
"""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[0]:
b_selector = bd_idx[i]
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector])
sigmasq[i] = - x[:, 0].dot(b[:, 0])
return kvalues, sigmasq
def execute(self, style, xpoints, ypoints, zpoints, mask=None,
backend='vectorized', n_closest_points=None):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation.
Note that currently measurements (i.e., z values) are
considered 'exact'. This means that, when a specified coordinate
for interpolation is exactly the same as one of the data points,
the variogram evaluated at the point is forced to be zero.
Also, the diagonal of the kriging matrix is also always forced
to be zero. In forcing the variogram evaluated at data points
to be zero, we are effectively saying that there is no variance
at that point (no uncertainty, so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean
flag that can be adjusted to specify whether to treat the
measurements as 'exact'. Setting the flag to false would indicate
that the variogram should not be forced to be zero at zero distance
(i.e., when evaluated at data points). Instead, the uncertainty in the
point will be equal to the nugget. This would mean that the diagonal
of the kriging matrix would be set to the nugget instead of to zero.
Parameters
----------
style : str
Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of
x, y, and z coordinates that define a rectangular grid.
Specifying 'points' treats xpoints, ypoints, and zpoints as arrays
that provide coordinates at which to solve the kriging system.
Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays
of x, y, and z coordinates that define a rectangular grid and uses
mask to only evaluate specific points in the grid.
xpoints : array_like, shape (N,) or (N, 1)
If style is specific as 'grid' or 'masked', x-coordinates of
LxMxN grid. If style is specified as 'points', x-coordinates of
specific points at which to solve kriging system.
ypoints : array-like, shape (M,) or (M, 1)
If style is specified as 'grid' or 'masked', y-coordinates of
LxMxN grid. If style is specified as 'points', y-coordinates of
specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
zpoints : array-like, shape (L,) or (L, 1)
If style is specified as 'grid' or 'masked', z-coordinates of
LxMxN grid. If style is specified as 'points', z-coordinates of
specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
mask : boolean array, shape (L, M, N), optional
Specifies the points in the rectangular grid defined by xpoints,
ypoints, zpoints that are to be excluded in the
kriging calculations. Must be provided if style is specified
as 'masked'. False indicates that the point should not be masked,
so the kriging system will be solved at the point.
True indicates that the point should be masked, so the kriging
system should will not be solved at the point.
backend : str, optional
Specifies which approach to use in kriging. Specifying 'vectorized'
will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging
system is to be solved. This approach is slower but also less
memory-intensive. Default is 'vectorized'.
n_closest_points : int, optional
For kriging with a moving window, specifies the number of nearby
points to use in the calculation. This can speed up the calculation
for large datasets, but should be used with caution.
As Kitanidis notes, kriging with a moving window can produce
unexpected oddities if the variogram model is not carefully chosen.
Returns
-------
kvalues : ndarray, shape (L, M, N) or (N, 1)
Interpolated values of specified grid or at the specified set
of points. If style was specified as 'masked', kvalues will be a
numpy masked array.
sigmasq : ndarray, shape (L, M, N) or (N, 1)
Variance at specified grid points or at the specified set of points.
If style was specified as 'masked', sigmasq will be a numpy
masked array.
"""
if self.verbose:
print("Executing Ordinary Kriging...\n")
if style != 'grid' and style != 'masked' and style != 'points':
raise ValueError("style argument must be 'grid', 'points', "
"or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
nx = xpts.size
ny = ypts.size
nz = zpts.size
a = self._get_kriging_matrix(n)
if style in ['grid', 'masked']:
if style == 'masked':
if mask is None:
raise IOError("Must specify boolean masking array when "
"style is 'masked'.")
if mask.ndim != 3:
raise ValueError("Mask is not three-dimensional.")
if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx:
if mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny:
mask = mask.swapaxes(0, 2)
else:
raise ValueError("Mask dimensions do not match "
"specified grid dimensions.")
mask = mask.flatten()
npt = nz * ny * nx
grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing='ij')
xpts = grid_x.flatten()
ypts = grid_y.flatten()
zpts = grid_z.flatten()
elif style == 'points':
if xpts.size != ypts.size and ypts.size != zpts.size:
raise ValueError("xpoints, ypoints, and zpoints must have "
"same dimensions when treated as listing "
"discrete points.")
npt = nx
else:
raise ValueError("style argument must be 'grid', "
"'points', or 'masked'")
xpts, ypts, zpts = \
_adjust_for_anisotropy(np.vstack((xpts, ypts, zpts)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z]).T
if style != 'masked':
mask = np.zeros(npt, dtype='bool')
xyz_points = np.concatenate((zpts[:, np.newaxis], ypts[:, np.newaxis],
xpts[:, np.newaxis]), axis=1)
xyz_data = np.concatenate((self.Z_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED[:, np.newaxis]), axis=1)
bd = cdist(xyz_points, xyz_data, 'euclidean')
if n_closest_points is not None:
from scipy.spatial import cKDTree
tree = cKDTree(xyz_data)
bd, bd_idx = tree.query(xyz_points, k=n_closest_points, eps=0.0)
if backend == 'loop':
kvalues, sigmasq = \
self._exec_loop_moving_window(a, bd, mask, bd_idx)
else:
raise ValueError("Specified backend '{}' not supported "
"for moving window.".format(backend))
else:
if backend == 'vectorized':
kvalues, sigmasq = self._exec_vector(a, bd, mask)
elif backend == 'loop':
kvalues, sigmasq = self._exec_loop(a, bd, mask)
else:
raise ValueError('Specified backend {} is not supported for '
'3D ordinary kriging.'.format(backend))
if style == 'masked':
kvalues = np.ma.array(kvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ['masked', 'grid']:
kvalues = kvalues.reshape((nz, ny, nx))
sigmasq = sigmasq.reshape((nz, ny, nx))
return kvalues, sigmasq
|
bsd-3-clause
|
davidthaler/Kaggle_Truly-Native
|
util.py
|
1
|
6548
|
import pandas as pd
import numpy as np
import os
import gzip
from math import log
from datetime import datetime
from sklearn.datasets import load_svmlight_file
from sklearn.preprocessing import normalize
from sklearn.preprocessing import scale
from sklearn.externals import joblib
import artifacts
import paths
# This module imports pandas and sklearn, so it can't run under pypy.
def load_sparse(feature_set_name,
n_features,
log_transform=True,
row_normalize=True,
verbose=True):
'''
Utility function to load a sparse feature set for linear models.
Features must be in LibSVM format.
Must be located at data/processed<feature_set_name>.libsvm
Args:
feature_set_name - just the filename, w/o path or extension
n_features - probably 2**20, see sparse_features.py
log_transform - if True, transform counts with log1p
row_normalize - if True, L2 normalize rows after log transform (if used)
verbose - default True. If True, print the runtime.
Returns:
x - scipy.sparse.csr matrix of features
y - numpy 1-D array of labels
'''
start = datetime.now()
cached = os.path.join(paths.TMP, feature_set_name + '.job')
if os.path.isfile(cached):
x, y = joblib.load(cached)
else:
path = os.path.join(paths.PROCESSED, feature_set_name + '.libsvm')
x, y = load_svmlight_file(path, n_features=n_features)
joblib.dump((x, y), cached)
if log_transform:
x = x.log1p()
if row_normalize:
x = normalize(x)
finish = datetime.now()
if verbose:
print 'elapsed time: %d sec.' % (finish - start).seconds
return x, y
def load_features(feature_set_name):
'''
Utility function to load a feature set from:
paths.PROCESSED/<feature_set_name>.csv
Args:
a bare filename of the feature set without path or extension
Returns:
a Pandas data frame of features
'''
path = os.path.join(paths.PROCESSED, feature_set_name + '.csv')
out = pd.read_csv(path)
out.fillna(0, inplace=True)
return out
def get_drop_cols(df):
'''
Finds column names of any all-zero columns in a Pandas data frame.
Args:
df - a Pandas data frame of features
Returns:
a list of column names of all-zero columns
'''
drops = []
for c in df.columns:
if not df[c].any():
drops.append(c)
return drops
def load_train(as_dict):
'''
Loads full training set, including labels, as either a data frame or a dict.
Rewrites label to a python int (it comes in as numpy.int64) so that we can
use this dict with pypy, if as_dict is True.
Args:
as_dict - If True, return a dict from filenames to labels
instead of a data frame.
Returns:
the training data either as a data frame,
or as a dict from filenames to labels.
'''
tr = pd.read_csv(paths.TRAIN)
if as_dict:
# prevents numpy.int64 from getting into the dict and killing pypy
labels = [int(x) for x in tr.sponsored]
return dict(zip(tr.file, labels))
else:
return tr
def create_sample(outfile, n_pos, n_neg):
'''
Creates a dict describing a specific sample of rows and saves
it at ARTIFACTS with form {filename : label}. Rewrites label
to a python int (it comes in as numpy.int64) so that we can
use this dict with pypy.
args:
outfile - the dict is written at ARTIFACTS/<outfile>.pkl
n_pos - approximate number of positive instances in sample
n_neg - approximate number of negative instances in sample
return:
nothing, but dict is pickled at ARTIFACT/<outfile>.pkl
'''
tr = load_train(False)
tr_pos = tr[tr.sponsored == 1]
tr_pos = tr_pos.sample(n_pos)
tr_neg = tr[tr.sponsored == 0]
tr_neg = tr.sample(n_neg)
sample_df = tr_pos.append(tr_neg)
# We need this to prevent the pickled sample dict from containing a
# numpy.int64, which prevents using pypy
labels = [int(x) for x in sample_df.sponsored]
sample = dict(zip(sample_df.file, labels))
artifacts.put_artifact(sample, outfile)
def write_submission(pred, submit_id):
'''
Writes out a submission from an array of scores.
Order of scores must be that of zip_io.generate_test.
Args:
pred - numpy 1-D array or similar of scores to write.
submit_id - identifier for the submission
Writes:
A submission at paths.SUBMIT/submisssion_<submit_id>.csv
'''
s1 = os.path.join(paths.SUBMIT, 'submission_1.csv.gz')
result = pd.read_csv(s1, compression='gzip')
result.sponsored = pred
submission_name = 'submission_%s.csv' % str(submit_id)
submission = os.path.join(paths.SUBMIT, submission_name)
result.to_csv(submission, index=False)
def combine_dvs(dv_subs, prob_subs, clip_at, submit_id):
'''
Take lists of filenames of submissions in .csv or .csv.gz format and
combine them into one submission.
Outputs of models that output probabilities are clipped and converted
to log-odds and then averaged together.
Outputs of models that output log-odds or decision values are just averaged.
The two averaged predictions are then standard-scaled and added.
Filenames should be with extension but without path.
The files must be at BASE/submissions
Args:
prob_subs - list of str. - filenames of probability-scale submissions
dv_subs - list of str. - filenames of log-odds scale (or dv) entries
clip_at - the probabilities are clipped at [clip_at, 1 - clip_at]
submit_id - the result is written as submissions/submission_<submit_id>.csv
Writes:
A submission at paths.SUBMIT/submisssion_<submit_id>.csv
'''
logodds = lambda x : log(x/(1 - x))
all_odds = None
for sub_name in prob_subs:
path = os.path.join(paths.SUBMIT, sub_name)
if sub_name.endswith('.gz'):
sub = pd.read_csv(path, compression='gzip')
else:
sub = pd.read_csv(path)
pred = sub.sponsored.clip(lower = clip_at, upper = 1 - clip_at)
pred = pred.apply(logodds)
if all_odds is None:
all_odds = pred
else:
all_odds += pred
all_odds = all_odds / len(prob_subs)
all_odds = scale(all_odds)
all_dvs = None
for sub_name in dv_subs:
path = os.path.join(paths.SUBMIT, sub_name)
if sub_name.endswith('.gz'):
sub = pd.read_csv(path, compression='gzip')
else:
sub = pd.read_csv(path)
if all_dvs is None:
all_dvs = sub.sponsored
else:
all_dvs += sub.sponsored
all_dvs = all_dvs / len(dv_subs)
all_dvs = scale(all_dvs)
result = all_dvs + all_odds
write_submission(result, submit_id)
|
mit
|
JohannesFeldmann/pism
|
examples/ross/plot.py
|
5
|
2762
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from argparse import ArgumentParser
# Import all necessary modules here so that if it fails, it fails early.
try:
import netCDF4 as NC
except:
import netCDF3 as NC
# Set up the option parser
parser = ArgumentParser()
parser.description = "A script to plot results of the ROSS example."
parser.add_argument("FILE", nargs='*')
options = parser.parse_args()
args = options.FILE
if len(args) == 1:
pism_output = args[0]
else:
print("wrong number of arguments, 1 expected, %i given" % int(len(args)))
import sys
exit(1)
try:
nc = NC.Dataset(pism_output, 'r')
except:
print("file %s not found" % pism_output)
import sys
exit(1)
def get(name):
global nc
return np.squeeze(nc.variables[name][:])
def floating(a):
global mask
return np.ma.array(a, mask=mask!=3)
# load data and restrict velocities to floating areas
seconds_per_year = 3.1556926e7
x = get('x')
y = get('y')
csurf = get('csurf')
mask = get('mask')
u = floating(get('u_ssa'))
v = floating(get('v_ssa'))
# B.C.s are observations, so a PISM output file contains everything we need
u_bc = floating(get('u_ssa_bc')) * seconds_per_year # convert to m/year
v_bc = floating(get('v_ssa_bc')) * seconds_per_year
# dark background
plt.figure(1)
plt.subplot(111, axisbg='0.5')
# mark the grounding line
plt.contour(x, y, mask, [2.5], colors="black", lw=2)
# plot csurf using log color scale
plt.pcolormesh(x, y, csurf, norm=colors.LogNorm(vmin=1, vmax=1.5e3))
# add a colorbar:
plt.colorbar(extend='both', ticks=[1, 10, 100, 250, 500, 1000], format="%d")
# quiver plot of velocities
s = 10 # stride in grid points
plt.quiver(x[::s], y[::s], u_bc[::s,::s], v_bc[::s,::s], color='white')
plt.quiver(x[::s], y[::s], u[::s,::s], v[::s,::s], color='black')
plt.xticks([])
plt.yticks([])
plt.title(r"Ross ice velocity (m/year); white=observed, black=model")
print "saving figure 'rossquiver.png'"
plt.savefig('rossquiver.png', dpi=300)
# do the scatter plot
magnitude = np.sqrt(np.abs(u[::s,::s])**2 + np.abs(v[::s,::s])**2)
bc_magnitude = np.sqrt(np.abs(u_bc[::s,::s])**2 + np.abs(v_bc[::s,::s])**2)
max_velocity = np.maximum(magnitude.max(), bc_magnitude.max())
plt.figure(2)
plt.hold(True)
plt.scatter(magnitude, bc_magnitude, color='black')
plt.plot([0, max_velocity], [0, max_velocity], color='black', ls='--')
plt.axis(xmin=0, xmax=max_velocity, ymin=0, ymax=max_velocity)
plt.xlabel('modeled speed')
plt.ylabel('observed speed')
plt.title("Observed versus modeled speed (m/year), at points in quiver plot")
print "saving figure 'rossscatter.png'"
plt.savefig('rossscatter.png', dpi=300)
|
gpl-2.0
|
Garrett-R/scikit-learn
|
examples/model_selection/randomized_search.py
|
57
|
3208
|
"""
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
iris = load_digits()
X, y = iris.data, iris.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
|
bsd-3-clause
|
jainaman224/Algo_Ds_Notes
|
Cohen_Sutherland/Cohen_Sutherland.py
|
1
|
3721
|
#!/usr/bin/env python
# Python program to implement Cohen Sutherland algorithm
# for line clipping.
import matplotlib.pyplot as plt
# Defining xmax,ymax and xmin,ymin for rectangle
xmax = 400.0
ymax = 400.0
xmin = 100.0
ymin = 100.0
# 4 bit code according to respective regions
inside = 0 # 0000
top = 8 # 1000
bottom = 4 # 0100
right = 2 # 0010
left = 1 # 0001
# Function to find region code for a point(x,y)
def find_code(x, y):
code = inside
if x < xmin: # to the left of rectangle
code |= left
elif x > xmax: # to the right of rectangle
code |= right
if y < ymin: # below the rectangle
code |= bottom
elif y > ymax: # above the rectangle
code |= top
return code
# Implemention of Cohen Sutherland Algorithm
# In order to clip a line from P to Q
def CohenSutherlandClipping(x1, y1, x2, y2):
# find region codes for P, Q
p = find_code(x1, y1) # find code for P
q = find_code(x2, y2) # find code for Q
accept = 0
while True:
if p == 0 and q == 0: # If both points are inside the rectangle
accept = 1
break
elif (p & q) != 0: # If both points are outside rectangle
break
# Some portion of line lies inside the
# rectangle and rest outside the rectangle
else:
# Line Needs clipping
# At least one of the points is outside the rectangle,
# therefore select it,
x = 1.0
y = 1.0
if p != 0:
point_out = p
else:
point_out = q
# Find the intersection point using formulas:
# y = y1 + slope * (x - x1),
# x = x1 + (1 / slope) * (y - y1)
if point_out & top:
# If line crosses ymin
# point is above the clip rectangle
x = x1 + (x2 - x1) * (ymax - y1) / (y2 - y1)
y = ymax
elif point_out & bottom:
# If line crosses ymin
# point is below the clip rectangle
x = x1 + (x2 - x1) * (ymin - y1) / (y2 - y1)
y = ymin
elif point_out & right:
# If line crosses xmax
# point is to the right of the clip rectangle
y = y1 + (y2 - y1) * (xmax - x1) / (x2 - x1)
x = xmax
elif point_out & left:
# If line crosses xmin
# point is to the left of the clip rectangle
y = y1 + (y2 - y1) * (xmin - x1) / (x2 - x1)
x = xmin
# Now intersection point x,y is found so
# We replace point outside clipping rectangle
# by intersection point
if point_out == p:
x1 = x
y1 = y
p = find_code(x1, y1)
else:
x2 = x
y2 = y
q = find_code(x2, y2)
if accept:
print(" The line accepted from %.2f,%.2f to %.2f,%.2f" % (x1, y1, x2, y2))
else:
print("The line is rejected")
x1 = int(input("Enter x1"))
x2 = int(input("Enter x2"))
y1 = int(input("Enter y1"))
y2 = int(input("Enter y2"))
CohenSutherlandClipping(x1, x2, y1, y2)
# Sample Input 1
# CohenSutherlandClipping(120, 350, 60, 450)
# Output 1
# The line accepted from 120.00,350.00 to 100.00,383.33
# line before clipping
# https://drive.google.com/file/d/1xnz25GTDN-I7IjG4ia9Un1DEj_xE6gDU/view?usp=sharing
# line after clipping
# https://drive.google.com/file/d/1hQ0rzD-YAYYXx68x9ElwvOEY5t7QV5zu/view?usp=sharing
# Plotting the graph
plt.plot(x1, x2, y1, y2)
plt.show()
|
gpl-3.0
|
mwcraig/aplpy
|
aplpy/tests/test_pixworld.py
|
4
|
2244
|
import os
import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.table import Table
from astropy.tests.helper import pytest
from .helpers import generate_wcs
from .. import wcs_util
HEADER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/2d_fits', '1904-66_TAN.hdr')
tab = Table({'RA':[347.,349.], 'DEC':[-68.,-68]})
GOOD_INPUT = [ [1.,2.],
[[1.,2.],[3,4]],
[np.arange(2), np.arange(2)],
[tab['RA'], tab['DEC']]
]
BAD_INPUT = [ [1,['s','w']],
[np.arange(2), np.sum],
[tab['RA'], 'ewr']
]
@pytest.mark.parametrize(('inputval'), GOOD_INPUT)
def test_pixworld_input_and_consistency(inputval):
wcs = generate_wcs(HEADER)
ra, dec = wcs_util.pix2world(wcs, inputval[0], inputval[1])
x, y = wcs_util.world2pix(wcs, ra, dec)
# For some inputs (e.g. list) a-b is not defined
# so change them all to np.ndarrays here to make sure
assert np.all(np.abs(np.array(x) - np.array(inputval[0])) < 1e-5)
assert np.all(np.abs(np.array(y) - np.array(inputval[1])) < 1e-5)
def test_returntypes():
wcs = generate_wcs(HEADER)
ra, dec = wcs_util.pix2world(wcs, 1.,2.)
assert np.isscalar(ra) and np.isscalar(dec)
ra, dec = wcs_util.pix2world(wcs, [1.],[2.])
assert (type(ra) == list) and (type(dec) == list)
# Astropy.table.Column objects get donwconverted np.ndarray
ra, dec = wcs_util.pix2world(wcs, np.arange(2), tab['DEC'])
assert isinstance(ra, np.ndarray) and isinstance(dec, np.ndarray)
@pytest.mark.parametrize(('inputval'), BAD_INPUT)
def test_pix2world_fail(inputval):
wcs = generate_wcs(HEADER)
with pytest.raises(Exception) as exc:
wcs_util.pix2world(wcs, inputval[0], inputval[1])
assert exc.value.args[0] == "pix2world should be provided either with two scalars, two lists, or two numpy arrays"
@pytest.mark.parametrize(('inputval'), BAD_INPUT)
def test_world2pix_fail(inputval):
wcs = generate_wcs(HEADER)
with pytest.raises(Exception) as exc:
wcs_util.world2pix(wcs, inputval[0], inputval[1])
assert exc.value.args[0] == "world2pix should be provided either with two scalars, two lists, or two numpy arrays"
|
mit
|
Kortemme-Lab/klab
|
klab/benchmarking/analysis/plot.py
|
1
|
2603
|
import os
import traceback
import matplotlib.pyplot as plt
import numpy
from klab.fs.fsio import write_temp_file
def create_csv(analysis_table):
contents = '\n'.join(['DatasetID,Experimental,Predicted'] + ['%s,%s,%s' % (str(l['DatasetID']), str(l['Experimental']), str(l['Predicted'])) for l in analysis_table])
return write_temp_file('.', contents)
def plot(analysis_table, output_filename, RFunction, title = ''):
filetype = os.path.splitext(output_filename)[1].lower()
if not(filetype == '.png' or filetype == '.pdf' or filetype == '.eps'):
filetype = 'png'
output_filename += '.png'
else:
filetype = filetype[1:]
if len(analysis_table) <= 1:
raise Exception("The analysis table must have at least two points.")
else:
input_filename = create_csv(analysis_table)
try:
R_output = RFunction(input_filename, output_filename, filetype, title = title)
except Exception as e:
print((traceback.format_exc()))
os.remove(input_filename)
raise Exception(e)
os.remove(input_filename)
return output_filename
def plot_pandas(dataframe, x_series, y_series, output_filename, RFunction, title = ''):
new_dataframe = dataframe[[x_series, y_series]]
new_dataframe.columns = ['Experimental', 'Predicted']
new_dataframe = new_dataframe.dropna(subset = ['Experimental', 'Predicted']) # todo: using 'Experimental' and 'Predicted' is hacky - make the inner function more general
csv_filename = os.path.splitext(output_filename)[0] + '.txt'
filetype = os.path.splitext(output_filename)[1].lower()
if not(filetype == '.png' or filetype == '.pdf' or filetype == '.eps'):
filetype = 'png'
output_filename += '.png'
else:
filetype = filetype[1:]
if len(new_dataframe) <= 1:
raise Exception("The analysis table must have at least two points.")
else:
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
try:
R_output = RFunction(csv_filename, output_filename, filetype, title = title)
except Exception as e:
print((traceback.format_exc()))
raise Exception(e)
return output_filename
def histogram(values, out_filepath, num_bins = 50):
hist, bins = numpy.histogram(values, bins=num_bins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
fig, ax = plt.subplots()
ax.bar(center, hist, align='center', width=width)
fig.savefig(out_filepath)
|
mit
|
alexeyum/scikit-learn
|
examples/cross_decomposition/plot_compare_cross_decomposition.py
|
55
|
4761
|
"""
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
|
bsd-3-clause
|
drpngx/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
14
|
62939
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2', 'VarHandleOp'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError('Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics,
predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError('Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics,
labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(sklearn.BaseEstimator, evaluable.Evaluable,
trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
@deprecated(None, 'Please replace uses of any Estimator from tf.contrib.learn'
' with an Estimator from tf.estimator.*')
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
'model_dir are set both in constructor and RunConfig, but with '
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with the following signature:
`def model_fn(features, labels, mode, metrics)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config=config)
return public_model_fn
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def partial_fit(self,
x=None,
y=None,
input_fn=None,
steps=1,
batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(
x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=monitors)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('batch_size', None), ('as_iterable', True))
def predict(self,
x=None,
input_fn=None,
batch_size=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
iterate_batches: If True, yield the whole batch at once instead of
decomposing the batch into individual samples. Only relevant when
as_iterable is True.
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable,
iterate_batches=iterate_batches)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(
self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.', str(labels),
str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval'
if not name else 'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise '
'`OutOfRangeError`, the evaluation will never stop. '
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions)
if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' % (model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None, config=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
config: RunConfig.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
if config:
kwargs['config'] = config
else:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(
_make_metrics_ops(metrics, features, labels,
model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(self,
export_dir_base,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec(
(tag_constants.SERVING,), ()),),
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
# pylint: enable=line-too-long
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(variables.local_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session,
untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op,
strip_default_attrs=strip_default_attrs)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [
tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()
]
output_names = [
tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()
]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(
compat.as_bytes(temp_export_dir), compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(
compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please switch to the Estimator interface.')
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x,
None,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate([output[key] for output in results], axis=0)
for key in results[0]
}
|
apache-2.0
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/pandas/io/stata.py
|
6
|
83955
|
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.core.dtypes.common import (
is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util._decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas._libs.lib import max_len_string_array, infer_dtype
from pandas._libs.tslib import NaT, Timestamp
VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
try:
data = reader.read()
finally:
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'latin-1'
def __init__(self, encoding):
if encoding is not None:
if encoding not in VALID_ENCODINGS:
raise ValueError('Unknown encoding. Only latin-1 and ascii '
'supported.')
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='latin-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
if encoding is not None:
if encoding not in VALID_ENCODINGS:
raise ValueError('Unknown encoding. Only latin-1 and ascii '
'supported.')
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {'0': ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# GH #13856
# Avoid locale-specific month conversion
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (time_stamp.strftime("%d ") +
month_lookup[time_stamp.month] +
time_stamp.strftime(" %Y %H:%M"))
self._file.write(self._null_terminate(ts))
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
|
agpl-3.0
|
pastas/pastas
|
pastas/solver.py
|
1
|
19775
|
"""
This module contains the different solvers that are available for Pastas.
All solvers inherit from the BaseSolver class, which contains general method
for selecting the correct time series to misfit and options to weight the
residuals or noise series.
To solve a model the following syntax can be used:
>>> ml.solve(solver=ps.LeastSquares)
"""
from logging import getLogger
import numpy as np
from pandas import DataFrame
from scipy.linalg import svd
from scipy.optimize import least_squares
logger = getLogger(__name__)
class BaseSolver:
_name = "BaseSolver"
__doc__ = """All solver instances inherit from the BaseSolver class.
Attributes
----------
model: pastas.Model instance
pcor: pandas.DataFrame
Pandas DataFrame with the correlation between the optimized parameters.
pcov: pandas.DataFrame
Pandas DataFrame with the correlation between the optimized parameters.
nfev: int
Number of times the model is called during optimization.
result: object
The object returned by the minimization method that is used. It depends
on the solver what is actually returned.
"""
def __init__(self, ml, pcov=None, nfev=None, obj_func=None, **kwargs):
self.ml = ml
self.pcov = pcov # Covariances of the parameters
if pcov is None:
self.pcor = None # Correlation between parameters
else:
self.pcor = self._get_correlations(pcov)
self.nfev = nfev # number of function evaluations
self.obj_func = obj_func
self.result = None # Object returned by the optimization method
def misfit(self, p, noise, weights=None, callback=None,
returnseparate=False):
"""This method is called by all solvers to obtain a series that are
minimized in the optimization process. It handles the application of
the weights, a noisemodel and other optimization options.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
noise: Boolean
weights: pandas.Series, optional
pandas Series by which the residual or noise series are
multiplied. Typically values between 0 and 1.
callback: ufunc, optional
function that is called after each iteration. the parameters are
provided to the func. E.g. "callback(parameters)"
returnseparate: bool, optional
return residuals, noise, noiseweights
Returns
-------
rv:
residuals series (if noise=False) or noise series (if noise=True)
"""
# Get the residuals or the noise
if noise:
rv = self.ml.noise(p) * \
self.ml.noise_weights(p)
else:
rv = self.ml.residuals(p)
# Determine if weights need to be applied
if weights is not None:
weights = weights.reindex(rv.index)
weights.fillna(1.0, inplace=True)
rv = rv.multiply(weights)
if callback:
callback(p)
if returnseparate:
return self.ml.residuals(p).values, \
self.ml.noise(p).values, \
self.ml.noise_weights(p).values
return rv.values
def prediction_interval(self, n=1000, alpha=0.05, **kwargs):
"""Method to calculate the prediction interval for the simulation.
Returns
-------
data : Pandas.DataFrame
DataFrame of length number of observations and two columns labeled
0.025 and 0.975 (numerical values) containing the 2.5% and 97.5%
prediction interval (for alpha=0.05)
Notes
-----
Add residuals assuming a Normal distribution with standard deviation
equal to the standard deviation of the residuals.
"""
sigr = self.ml.residuals().std()
data = self._get_realizations(func=self.ml.simulate, n=n, name=None,
**kwargs)
data = data + sigr * np.random.randn(data.shape[0], data.shape[1])
q = [alpha / 2, 1 - alpha / 2]
rv = data.quantile(q, axis=1).transpose()
return rv
def ci_simulation(self, n=1000, alpha=0.05, **kwargs):
"""Method to calculate the confidence interval for the simulation.
Returns
-------
Notes
-----
The confidence interval shows the uncertainty in the simulation due
to parameter uncertainty. In other words, there is a 95% probability
that the true best-fit line for the observed data lies within the
95% confidence interval.
"""
return self._get_confidence_interval(func=self.ml.simulate, n=n,
alpha=alpha, **kwargs)
def ci_block_response(self, name, n=1000, alpha=0.05, **kwargs):
dt = self.ml.get_block_response(name=name).index.values
return self._get_confidence_interval(func=self.ml.get_block_response,
n=n, alpha=alpha, name=name,
dt=dt, **kwargs)
def ci_step_response(self, name, n=1000, alpha=0.05, **kwargs):
dt = self.ml.get_block_response(name=name).index.values
return self._get_confidence_interval(func=self.ml.get_step_response,
n=n, alpha=alpha, name=name,
dt=dt, **kwargs)
def ci_contribution(self, name, n=1000, alpha=0.05, **kwargs):
return self._get_confidence_interval(func=self.ml.get_contribution,
n=n, alpha=alpha, name=name,
**kwargs)
def _get_realizations(self, func, n=None, name=None, **kwargs):
"""Internal method to obtain n number of parameter realizations."""
if name:
kwargs["name"] = name
parameter_sample = self._get_parameter_sample(n=n, name=name)
data = {}
for i, p in enumerate(parameter_sample):
data[i] = func(p=p, **kwargs)
return DataFrame.from_dict(data, orient="columns")
def _get_confidence_interval(self, func, n=None, name=None, alpha=0.05,
**kwargs):
"""Internal method to obtain a confidence interval."""
q = [alpha / 2, 1 - alpha / 2]
data = self._get_realizations(func=func, n=n, name=name, **kwargs)
return data.quantile(q=q, axis=1).transpose()
def _get_parameter_sample(self, name=None, n=None):
"""Internal method to obtain a parameter sets.
Parameters
----------
n: int, optional
Number of random samples drawn from the bivariate normal
distribution.
name: str, optional
Name of the stressmodel or model component to obtain the
parameters for.
Returns
-------
numpy.ndarray
Numpy array with N parameter samples.
"""
p = self.ml.get_parameters(name=name)
pcov = self._get_covariance_matrix(name=name)
if name is None:
parameters = self.ml.parameters
else:
parameters = self.ml.parameters.loc[
self.ml.parameters.name == name]
pmin = parameters.pmin.fillna(-np.inf).values
pmax = parameters.pmax.fillna(np.inf).values
if n is None:
# only use parameters that are varied.
n = int(10 ** parameters.vary.sum())
samples = np.zeros((0, p.size))
# Start truncated multivariate sampling
it = 0
while samples.shape[0] < n:
s = np.random.multivariate_normal(p, pcov, size=(n,),
check_valid="ignore")
accept = s[(np.min(s - pmin, axis=1) >= 0) &
(np.max(s - pmax, axis=1) <= 0)]
samples = np.concatenate((samples, accept), axis=0)
# Make sure there's no endless while loop
if it > 10:
break
else:
it += 1
return samples[:n, :]
def _get_covariance_matrix(self, name=None):
"""Internal method to obtain the covariance matrix from the model.
Parameters
----------
name: str, optional
Name of the stressmodel or model component to obtain the
parameters for.
Returns
-------
pcov: pandas.DataFrame
Pandas DataFrame with the covariances for the parameters.
"""
if name:
index = self.ml.parameters.loc[self.ml.parameters.loc[:,
"name"] == name].index
else:
index = self.ml.parameters.index
pcov = self.pcov.reindex(index=index, columns=index).fillna(0)
return pcov
@staticmethod
def _get_correlations(pcov):
"""Internal method to obtain the parameter correlations from the
covariance matrix.
Parameters
----------
pcov: pandas.DataFrame
n x n Pandas DataFrame with the covariances.
Returns
-------
pcor: pandas.DataFrame
n x n Pandas DataFrame with the correlations.
"""
index = pcov.index
pcov = pcov.to_numpy()
v = np.sqrt(np.diag(pcov))
with np.errstate(divide='ignore', invalid='ignore'):
corr = pcov / np.outer(v, v)
corr[pcov == 0] = 0
pcor = DataFrame(data=corr, index=index, columns=index)
return pcor
def to_dict(self):
data = {
"name": self._name,
"pcov": self.pcov,
"nfev": self.nfev,
"obj_func": self.obj_func
}
return data
class LeastSquares(BaseSolver):
_name = "LeastSquares"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solver based on Scipy's least_squares method [scipy_ref]_.
Notes
-----
This class is the default solve method called by the pastas Model solve
method. All kwargs provided to the Model.solve() method are forwarded
to the solver. From there, they are forwarded to Scipy least_squares
solver.
Examples
--------
>>> ml.solve(solver=ps.LeastSquares)
References
----------
.. [scipy_ref] https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
"""
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, **kwargs):
self.vary = self.ml.parameters.vary.values.astype(bool)
self.initial = self.ml.parameters.initial.values.copy()
parameters = self.ml.parameters.loc[self.vary]
# Set the boundaries
bounds = (np.where(parameters.pmin.isnull(), -np.inf, parameters.pmin),
np.where(parameters.pmax.isnull(), np.inf, parameters.pmax))
self.result = least_squares(self.objfunction, bounds=bounds,
x0=parameters.initial.values,
args=(noise, weights, callback), **kwargs)
self.pcov = DataFrame(self._get_covariances(self.result.jac,
self.result.cost),
index=parameters.index, columns=parameters.index)
self.pcor = self._get_correlations(self.pcov)
self.nfev = self.result.nfev
self.obj_func = self.result.cost
# Prepare return values
success = self.result.success
optimal = self.initial
optimal[self.vary] = self.result.x
stderr = np.zeros(len(optimal)) * np.nan
stderr[self.vary] = np.sqrt(np.diag(self.pcov))
return success, optimal, stderr
def objfunction(self, p, noise, weights, callback):
par = self.initial
par[self.vary] = p
return self.misfit(p=par, noise=noise, weights=weights,
callback=callback)
def _get_covariances(self, jacobian, cost, absolute_sigma=False):
"""Internal method to get the covariance matrix from the jacobian.
Parameters
----------
jacobian: numpy.ndarray
cost: float
absolute_sigma: bool
Default is False
Returns
-------
pcov: numpy.array
numpy array with the covariance matrix.
Notes
-----
This method is copied from Scipy, please refer to:
https://github.com/scipy/scipy/blob/v1.0.0/scipy/optimize/optimize.py
"""
cost = 2 * cost # res.cost is half sum of squares!
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(jacobian, full_matrices=False)
threshold = np.finfo(float).eps * max(jacobian.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s ** 2, VT)
n_param = self.ml.parameters.index.size
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = np.zeros((n_param, n_param), dtype=float)
pcov.fill(np.inf)
warn_cov = True
elif not absolute_sigma:
if self.ml.oseries.series.index.size > n_param:
s_sq = cost / (self.ml.oseries.series.index.size - n_param)
pcov = pcov * s_sq
else:
pcov.fill(np.inf)
warn_cov = True
if warn_cov:
logger.warning(
'Covariance of the parameters could not be estimated')
return pcov
class LmfitSolve(BaseSolver):
_name = "LmfitSolve"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solving the model using the LmFit solver [LM]_.
This is basically a wrapper around the scipy solvers, adding some
cool functionality for boundary conditions.
References
----------
.. [LM] https://github.com/lmfit/lmfit-py/
"""
try:
global lmfit
import lmfit as lmfit # Import Lmfit here, so it is no dependency
except ImportError:
msg = "lmfit not installed. Please install lmfit first."
raise ImportError(msg)
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, method="leastsq",
**kwargs):
# Deal with the parameters
parameters = lmfit.Parameters()
p = self.ml.parameters.loc[:, ['initial', 'pmin', 'pmax', 'vary']]
for k in p.index:
pp = np.where(p.loc[k].isnull(), None, p.loc[k])
parameters.add(k, value=pp[0], min=pp[1], max=pp[2], vary=pp[3])
# Create the Minimizer object and minimize
self.mini = lmfit.Minimizer(userfcn=self.objfunction, calc_covar=True,
fcn_args=(noise, weights, callback),
params=parameters, **kwargs)
self.result = self.mini.minimize(method=method)
# Set all parameter attributes
pcov = None
if hasattr(self.result, "covar"):
if self.result.covar is not None:
pcov = self.result.covar
names = self.result.var_names
self.pcov = DataFrame(pcov, index=names, columns=names, dtype=float)
self.pcor = self._get_correlations(self.pcov)
# Set all optimization attributes
self.nfev = self.result.nfev
self.obj_func = self.result.chisqr
if hasattr(self.result, "success"):
success = self.result.success
else:
success = True
optimal = np.array([p.value for p in self.result.params.values()])
stderr = np.array([p.stderr for p in self.result.params.values()])
idx = None
if "is_weighted" in kwargs:
if not kwargs["is_weighted"]:
idx = -1
return success, optimal[:idx], stderr[:idx]
def objfunction(self, parameters, noise, weights, callback):
p = np.array([p.value for p in parameters.values()])
return self.misfit(p=p, noise=noise, weights=weights,
callback=callback)
class LmfitSolveNew(BaseSolver):
_name = "LmfitSolve"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solving the model using the LmFit solver [LM]_.
This is basically a wrapper around the scipy solvers, adding some
cool functionality for boundary conditions.
References
----------
.. [LM] https://github.com/lmfit/lmfit-py/
"""
try:
global lmfit
import lmfit as lmfit # Import Lmfit here, so it is no dependency
except ImportError:
msg = "lmfit not installed. Please install lmfit first."
raise ImportError(msg)
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self, noise=True, weights=None, callback=None, method="leastsq",
**kwargs):
# Deal with the parameters
parameters = lmfit.Parameters()
p = self.ml.parameters.loc[:, ['initial', 'pmin', 'pmax', 'vary']]
for k in p.index:
pp = np.where(p.loc[k].isnull(), None, p.loc[k])
parameters.add(k, value=pp[0], min=pp[1], max=pp[2], vary=pp[3])
# Create the Minimizer object and minimize
self.mini = lmfit.Minimizer(userfcn=self.objfunction, calc_covar=True,
fcn_args=(noise, weights, callback),
params=parameters, **kwargs)
self.result = self.mini.minimize(method=method)
# Set all parameter attributes
pcov = None
if hasattr(self.result, "covar"):
if self.result.covar is not None:
pcov = self.result.covar
names = self.result.var_names
self.pcov = DataFrame(pcov, index=names, columns=names)
self.pcor = self._get_correlations(self.pcov)
# Set all optimization attributes
self.nfev = self.result.nfev
self.obj_func = self.result.chisqr
if hasattr(self.result, "success"):
success = self.result.success
else:
success = True
optimal = np.array([p.value for p in self.result.params.values()])
stderr = np.array([p.stderr for p in self.result.params.values()])
idx = None
if "is_weighted" in kwargs:
if not kwargs["is_weighted"]:
idx = -1
return success, optimal[:idx], stderr[:idx]
def objfunction(self, parameters, noise, weights, callback):
param = np.array([p.value for p in parameters.values()])
res, noise, weights = self.misfit(param, noise, weights, callback,
returnseparate=True)
var_res = np.var(res, ddof=1)
weighted_noise = noise * weights
extraterm = np.sum(np.log(var_res / weights ** 2))
rv = np.sum(weighted_noise ** 2) / var_res + extraterm
return rv
class MonteCarlo(BaseSolver):
_name = "MonteCarlo"
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
def solve(self):
optimal = None
stderr = None
success = True
return success, optimal, stderr
|
mit
|
meduz/scikit-learn
|
sklearn/utils/sparsetools/tests/test_traversal.py
|
38
|
2018
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
try:
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
except ImportError:
# Oldish versions of scipy don't have that
csgraph_from_dense = None
def test_graph_breadth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
|
bsd-3-clause
|
pelson/cartopy
|
docs/source/conf.py
|
2
|
13753
|
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
#
# cartopy documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 16 09:41:05 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'cartopy.sphinxext.summarise_package',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
# 'sphinx.ext.autosummary',
# 'sphinxcontrib.napoleon', (Needs work before this can
# be enabled.)
# 'matplotlib.sphinxext.plot_directive'
# We use a local copy of the plot_directive until
# https://github.com/matplotlib/matplotlib/pull/6213 is
# available in order
# to benefit from cached rebuilds of plots.
'sphinxext.plot_directive',
# Monkey-patch sphinx_gallery to handle cartopy's __tags__
# example convention.
'sphinxext.pre_sphinx_gallery',
'sphinx_gallery.gen_gallery',
'sphinx.ext.napoleon'
]
import matplotlib
matplotlib.use('Agg')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cartopy'
copyright = u'2011 - 2018 British Crown Copyright' # the template will need
# updating if this is changed
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cartopy
version = cartopy.__version__
# The full version, including alpha/beta/rc tags.
release = cartopy.__version__
# Sphinx gallery configuration
from sphinx_gallery.sorting import ExampleTitleSortKey
sphinx_gallery_conf = {
'examples_dirs': ['../../lib/cartopy/examples'],
'filename_pattern': '^((?!sgskip).)*$',
'gallery_dirs': ['gallery'],
'within_subsection_order': ExampleTitleSortKey,
'doc_module': ('cartopy',),
'reference_url': {'cartopy': None},
'backreferences_dir': '../build/backrefs',
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cartopydoc'
html_context = {'rellinks': [('genindex', 'General Index', 'I', 'index'),
('cartopy_outline', 'Module outline', 'O',
'outline')]}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'cartopy.tex', u'Cartopy Introduction',
u'Philip Elson, Richard Hattersley', 'manual', False),
('introductory_examples/index', 'cartopy_examples.tex', u'Cartopy examples',
u'Philip Elson, Richard Hattersley', 'manual', True)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cartopy', u'cartopy Documentation',
[u'Philip Elson, Richard Hattersley'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cartopy', u'cartopy Documentation',
u'Philip Elson, Richard Hattersley', 'cartopy',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'cartopy'
epub_author = u'Philip Elson, Richard Hattersley'
epub_publisher = u'Philip Elson, Richard Hattersley'
epub_copyright = u'2012, Philip Elson, Richard Hattersley'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3', None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'shapely': ('http://toblerity.org/shapely', None), }
############ extlinks extension ############
extlinks = {'issues': ('https://github.com/SciTools/cartopy/labels/%s',
'issues labeled with '),
'issue': ('https://github.com/SciTools/cartopy/issues/%s',
'Issue #'),
'pull': ('https://github.com/SciTools/cartopy/pull/%s', 'PR #'),
}
############ package summary extension ###########
summarise_package_names = ['cartopy']
summarise_package_exclude_directories = [['tests', 'examples', 'sphinxext']]
summarise_package_fnames = ['cartopy_outline.rst']
############ plot directive ##############
plot_html_show_formats = False
# plot_rcparams = {'figure.autolayout': True}
plot_rcparams = {'figure.subplot.bottom': 0.04,
'figure.subplot.top': 0.96,
'figure.subplot.left': 0.04,
'figure.subplot.right': 0.96}
plot_formats = [('thumb.png', 20),
'png',
'pdf'
]
############ autodoc config ##############
autoclass_content = 'both'
# Napoleon settings
# napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = False
# # Numpydoc settings
# numpydoc_show_class_members = True
# numpydoc_show_inherited_class_members = False
# numpydoc_class_members_toctree = False
# numpydoc_use_blockquotes = True
|
lgpl-3.0
|
PECOS-KNL/kernels
|
src/gp/results/plot_membind_scaling.py
|
1
|
1541
|
import numpy as np
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot as plt
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = 'Computer Modern Roman'
rcParams['text.usetex'] = True
from matplotlib.ticker import LogLocator, NullLocator
from matplotlib.ticker import LogFormatter
membind_0_data = np.loadtxt('out.membind.0.txt', skiprows=1)
membind_1_data = np.loadtxt('out.membind.1.txt', skiprows=1)
xeon_data = np.loadtxt('E5-2670v3_out.txt', skiprows=1)
fig = plt.figure(figsize=(5.5,4))
ax = fig.add_subplot(1, 1, 1)
locator = LogLocator(2)
formatter = LogFormatter(2)
threads = [1, 2, 4, 8, 16, 32, 64, 128, 256]
xeon_threads = xeon_data[:,0]
ideal = np.zeros(membind_0_data.shape[0])
ideal[0] = membind_0_data[0,0] + membind_0_data[0,1]
for i in range(1, ideal.shape[0]):
ideal[i] = ideal[i-1] / 2.0
ax.loglog(threads, membind_0_data[:,0] + membind_0_data[:,1], 'b-o', label='membind=0')
ax.loglog(threads, membind_1_data[:,0] + membind_1_data[:,1], 'r-d', label='membind=1')
ax.loglog(xeon_threads, xeon_data[:,2] + xeon_data[:,3], 'g-s', label='Xeon E5-2670v3')
ax.loglog(threads, ideal, 'k--', label='Linear Scalability')
ax.set_xlim(0.51, 500)
ax.set_xlabel('OpenMP threads')
ax.set_ylabel('Elapsed time (sec)')
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_minor_locator(NullLocator())
ax.xaxis.set_major_formatter(formatter)
ax.grid(False)
ax.legend(frameon=False, numpoints=1)
fig.tight_layout()
fig.savefig('gp_membind_scaling_collapse.pdf')
ax.cla()
|
lgpl-2.1
|
PhenixI/machine-learning
|
10-Compressing_Data/LDA/LDA_python/LDA.py
|
1
|
1243
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
__author__ = 'PhenixI'
def read_data():
f=open("data_iris.txt",'r')
lines=[line.strip() for line in f.readlines()]
f.close()
lines= [line.split(",") for line in lines if line]
class1=np.array([line[:4] for line in lines if line[-1]=="Iris-setosa"],dtype=np.float)
class2=np.array([line[:4] for line in lines if line[-1]!="Iris-setosa"],dtype=np.float)
return class1,class2
def main():
class1,class2=read_data()
mean1=np.mean(class1,axis=0)#求类1的原始中心m1
mean2=np.mean(class2,axis=0)#类2的原始中心m2
#calculate variance within class
Sw = np.dot((class1-mean1).T,(class1-mean1))+np.dot((class2-mean2).T,(class2-mean2))#Sw,类内的距离和
#calculate weights which maximize linear separation,求解w(推到过程请看LDA introduction simple)
w=np.dot(np.linalg.inv(Sw),(mean2-mean1))
print "vector of max weights",w
#projection of classes on 1D space
plt.plot(np.dot(class1,w),[0]*class1.shape[0],"bo",label="Iris-setosa")
plt.plot(np.dot(class2,w),[0]*class2.shape[0],"go",label="Iris-versicolor and Iris-virginica")
plt.legend()
plt.show()
main()
|
gpl-2.0
|
cython-testbed/pandas
|
scripts/tests/test_validate_docstrings.py
|
1
|
18150
|
import string
import random
import io
import pytest
import numpy as np
import validate_docstrings
validate_one = validate_docstrings.validate_one
from pandas.util.testing import capture_stderr
class GoodDocStrings(object):
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
"""
def plot(self, kind, color='blue', **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot.
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
"""
return random.random()
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
"""
length = random.randint(1, 10)
letters = "".join(random.sample(string.ascii_lowercase, length))
return length, letters
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
"""
while True:
yield random.random()
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
Series
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
return self.iloc[:5]
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
Series
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon'])
>>> s.head()
0 Ant
1 Bear
2 Cow
3 Dog
4 Falcon
dtype: object
With the `n` parameter, we can change the number of returned rows:
>>> s.head(n=3)
0 Ant
1 Bear
2 Cow
dtype: object
"""
return self.iloc[:n]
def contains(self, pat, case=True, na=np.nan):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
Examples
--------
>>> s = pd.Series(['Antelope', 'Lion', 'Zebra', np.nan])
>>> s.str.contains(pat='a')
0 False
1 False
2 True
3 NaN
dtype: object
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s.str.contains(pat='a', case=False)
0 True
1 False
2 True
3 NaN
dtype: object
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s.str.contains(pat='a', na=False)
0 False
1 False
2 True
3 False
dtype: bool
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure sphinx directives don't affect checks for trailing periods.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : boolean
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
"""
pass
class BadGenericDocStrings(object):
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def method(self, foo=None, bar=None):
"""
A sample DataFrame method.
Do not import numpy and pandas.
Try to use meaningful data, when it makes the example easier
to understand.
Try to avoid positional arguments like in `df.method(1)`. They
can be alright if previously defined with a meaningful name,
like in `present_value(interest_rate)`, but avoid them otherwise.
When presenting the behavior with different parameters, do not place
all the calls one next to the other. Instead, add a short sentence
explaining what the example shows.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame(np.ones((3, 3)),
... columns=('a', 'b', 'c'))
>>> df.all(1)
0 True
1 True
2 True
dtype: bool
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
pass
class BadSummaries(object):
def wrong_line(self):
"""Exists on the wrong line"""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters(object):
"""
Everything here has a problem with its Parameters section.
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
class BadReturns(object):
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
class TestValidator(object):
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "scripts.tests.test_validate_docstrings"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
@capture_stderr
def test_good_class(self):
errors = validate_one(self._import_path(
klass='GoodDocStrings'))['errors']
assert isinstance(errors, list)
assert not errors
@capture_stderr
@pytest.mark.parametrize("func", [
'plot', 'sample', 'random_letters', 'sample_values', 'head', 'head1',
'contains', 'mode'])
def test_good_functions(self, func):
errors = validate_one(self._import_path(
klass='GoodDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert not errors
@capture_stderr
def test_bad_class(self):
errors = validate_one(self._import_path(
klass='BadGenericDocStrings'))['errors']
assert isinstance(errors, list)
assert errors
@capture_stderr
@pytest.mark.parametrize("func", [
'func', 'astype', 'astype1', 'astype2', 'astype3', 'plot', 'method'])
def test_bad_generic_functions(self, func):
errors = validate_one(self._import_path( # noqa:F821
klass='BadGenericDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize("klass,func,msgs", [
# Summary tests
('BadSummaries', 'wrong_line',
('should start in the line immediately after the opening quotes',)),
('BadSummaries', 'no_punctuation',
('Summary does not end with a period',)),
('BadSummaries', 'no_capitalization',
('Summary does not start with a capital letter',)),
('BadSummaries', 'no_capitalization',
('Summary must start with infinitive verb',)),
('BadSummaries', 'multi_line',
('Summary should fit in a single line.',)),
('BadSummaries', 'two_paragraph_multi_line',
('Summary should fit in a single line.',)),
# Parameters tests
('BadParameters', 'missing_params',
('Parameters {**kwargs} not documented',)),
('BadParameters', 'bad_colon_spacing',
('Parameters {kind} not documented',
'Unknown parameters {kind: str}',
'Parameter "kind: str" has no type')),
('BadParameters', 'no_description_period',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'no_description_period_with_directive',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'parameter_capitalization',
('Parameter "kind" description should start with a capital letter',)),
pytest.param('BadParameters', 'blank_lines', ('No error yet?',),
marks=pytest.mark.xfail),
# Returns tests
('BadReturns', 'return_not_documented', ('No Returns section found',)),
('BadReturns', 'yield_not_documented', ('No Yields section found',)),
pytest.param('BadReturns', 'no_type', ('foo',),
marks=pytest.mark.xfail),
pytest.param('BadReturns', 'no_description', ('foo',),
marks=pytest.mark.xfail),
pytest.param('BadReturns', 'no_punctuation', ('foo',),
marks=pytest.mark.xfail)
])
def test_bad_examples(self, capsys, klass, func, msgs):
result = validate_one(self._import_path(klass=klass, func=func)) # noqa:F821
for msg in msgs:
assert msg in ' '.join(result['errors'])
class ApiItems(object):
@property
def api_doc(self):
return io.StringIO('''
.. currentmodule:: itertools
Itertools
---------
Infinite
~~~~~~~~
.. autosummary::
cycle
count
Finite
~~~~~~
.. autosummary::
chain
.. currentmodule:: random
Random
------
All
~~~
.. autosummary::
seed
randint
''')
@pytest.mark.parametrize('idx,name', [(0, 'itertools.cycle'),
(1, 'itertools.count'),
(2, 'itertools.chain'),
(3, 'random.seed'),
(4, 'random.randint')])
def test_item_name(self, idx, name):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][0] == name
@pytest.mark.parametrize('idx,func', [(0, 'cycle'),
(1, 'count'),
(2, 'chain'),
(3, 'seed'),
(4, 'randint')])
def test_item_function(self, idx, func):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert callable(result[idx][1])
assert result[idx][1].__name__ == func
@pytest.mark.parametrize('idx,section', [(0, 'Itertools'),
(1, 'Itertools'),
(2, 'Itertools'),
(3, 'Random'),
(4, 'Random')])
def test_item_section(self, idx, section):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][2] == section
@pytest.mark.parametrize('idx,subsection', [(0, 'Infinite'),
(1, 'Infinite'),
(2, 'Finite'),
(3, 'All'),
(4, 'All')])
def test_item_subsection(self, idx, subsection):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][3] == subsection
|
bsd-3-clause
|
cgrima/icecap
|
raw.py
|
1
|
4957
|
"""
!!!!!!!!!!!!!!
! DEPRECATED !
!!!!!!!!!!!!!!
Various tools to read WAIS data
Author: Cyril Grima <[email protected]>
"""
import numpy as np
import pandas as pd
import os
import glob
import string
from scipy.interpolate import UnivariateSpline
from params import *
def grep_params(word, season = season):
"""grep a line in 'params' file for a given season
Arguments
---------
word : string
grep line that include 'word'
Keywords
--------
season : string
season name (e.g. 'ICP4')
"""
a = code_path.split('/')
fil = string.join(a[0: -5], '/') + '/pcor' + '/' + season + '/params'
out = os.popen('grep ' + word + ' ' + fil).read()
out = string.replace(out, '\n', '')
out = string.replace(out, '#', '')
out = string.replace(out, '(', '')
out = string.replace(out, ')', '')
out = string.replace(out, '{', '')
out = string.replace(out, '}', '')
out = string.replace(out, ',', '')
return out.split()
def list_pst(folder, pst, ext='*'):
"""list PSTs present in various folders
Arguments
---------
folder : string
folder name not including $WAIS (e.g. 'orig/xtra/ICP4/PIK/pik1.1m.RADnh3')
pst : string
pst name (e.g. 'MIS/JKB2e/Y35a')
Keywords
--------
ext : string
file extension if needed
"""
if folder.find('/RSR/') is not -1:
template = string.join([wais_path, folder, ''], '/') + \
string.join(pst.split('/'), '_') + '.' + ext + '*'
else:
template = string.join([wais_path, folder, pst, ''], '/') + '*' + ext
print(template)
return glob.glob(template)
def read_ztim(fil):
"""Read time in a ztim-format file with the following structure:
Arguments
---------
fil : string
full path + file name to read
"""
out = pd.read_csv(fil, sep='\(|\)| |,', header=None)
a = np.array(out)
htim = (a[:,3] - a[:,3])*24 + a[:,5]*24/(86400*1e4)
out['htim'] = htim
return out
def read_geo(pst):
"""read some geographic parameters from nrm and interpolate them
with the 1-m data ztim
Arguments
---------
pst : string
pst name (e.g. 'MIS/JKB2e/Y35a')
"""
AC = pst.split('/')[1][0:3]
foc_time = read_ztim(foc_path + '/' + pst + '/ztim_DNhH').htim
avn_time = read_ztim(norm_path + '/' + pst + '/AVN_'+AC+'a/syn_ztim').htim
las_time = read_ztim(norm_path + '/' + pst + '/LAS_'+AC+'a/syn_ztim').htim
lat_nrm = np.genfromtxt(norm_path + '/' + pst + '/AVN_'+AC+'a/lat_ang')
lon_nrm = np.genfromtxt(norm_path + '/' + pst + '/AVN_'+AC+'a/lon_ang')
roll_nrm = np.genfromtxt(norm_path + '/' + pst + '/AVN_'+AC+'a/roll_ang')
rng_nrm = np.genfromtxt(norm_path + '/' + pst + '/LAS_'+AC+'a/las_rng')
lat = np.interp(foc_time, avn_time, lat_nrm)
lon = np.interp(foc_time, avn_time, lon_nrm)
roll = np.interp(foc_time, avn_time, roll_nrm)
rng = np.interp(foc_time, las_time, rng_nrm)
#lat = UnivariateSpline(avn_time, lat_nrm)(foc_time)
#lon = UnivariateSpline(avn_time, lon_nrm)(foc_time)
#roll = UnivariateSpline(avn_time, roll_nrm)(foc_time)
#rng = UnivariateSpline(las_time, rng_nrm)(foc_time)
return pd.DataFrame({'lat':lat, 'lon':lon, 'roll':roll, 'rng':rng})
def read_pik(pst, ext, process='MagHiResInco1'):
"""Read pick files
Arguments
---------
pst : string
pst name (e.g. 'MIS/JKB2e/Y35a')
ext : string
pik file extension (e.g. 'elg_brn')
Keywords
--------
process : string
process name
Output
------
list, list
Y coordinate, value
"""
fil = pik_path + '/' + pst + '/' + process + '.' + ext
os.system('grep P ' + fil + ' > ' + code_path + '/.read_pik.tmp')
b = np.genfromtxt('.read_pik.tmp', delimiter='\t')
return b[:, 2], b[:, 3] # Y coordinate, value
def read_rsr(pst, ext, fit_model='hk', inv='spm'):
"""Read RSR files
Arguments
---------
pst : string
pst name (e.g. 'MIS/JKB2e/Y35a')
ext : string
pik file extension (e.g. 'elg_brn')
Keywords
--------
fit_model : string
statistical method used
inv : string
backscattering model used for physical properties inversion
"""
fil = rsr_path+'/'+string.join(pst.split('/'), '_') + '.' + ext + '.' + fit_model + '.' + inv + '.txt'
out = pd.read_table(fil)
return out
def read_bthm(pst, ext1, ext2):
"""Read BTHM files
Arguments
---------
pst : string
pst name (e.g. 'MIS/JKB2e/Y35a')
ext1 : string
pik file extension (e.g. 'elg_brn') for the 1st arrival
ext2 : string
pik file extension (e.g. 'elg_brn') for the 2nd arrival
"""
fil = glob.glob(rsr_path+'/'+string.join(pst.split('/'), '_') + '.' + ext1 + '-' + \
ext2 + '.bthm.txt')[0]
out = pd.read_table(fil)
return out
|
mit
|
f3r/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
34
|
30531
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from distutils.version import LooseVersion
from sklearn.utils.extmath import svd_flip
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
import scipy
pinv2_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
pinv2_args = {'check_finite': False}
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = linalg.pinv2(X, **pinv2_args)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
**pinv2_args))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
**pinv2_args))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
jefffohl/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/units.py
|
70
|
4810
|
"""
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
|
gpl-3.0
|
harisbal/pandas
|
pandas/tests/indexes/multi/test_contains.py
|
2
|
3130
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import PYPY
import pandas as pd
from pandas import MultiIndex
import pandas.util.testing as tm
def test_contains_top_level():
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat():
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_contains(idx):
assert ('foo', 'two') in idx
assert ('bar', 'two') not in idx
assert None not in idx
@pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_pypy():
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, True]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, True]))
def test_isin():
values = [('foo', 2), ('bar', 3), ('quux', 4)]
idx = MultiIndex.from_arrays([
['qux', 'baz', 'foo', 'bar'],
np.arange(4)
])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
@pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_not_pypy():
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, False]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, False]))
def test_isin_level_kwarg():
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
vals_0 = ['foo', 'bar', 'quux']
vals_1 = [2, 3, 10]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
pytest.raises(IndexError, idx.isin, vals_0, level=5)
pytest.raises(IndexError, idx.isin, vals_0, level=-5)
pytest.raises(KeyError, idx.isin, vals_0, level=1.0)
pytest.raises(KeyError, idx.isin, vals_1, level=-1.0)
pytest.raises(KeyError, idx.isin, vals_1, level='A')
idx.names = ['A', 'B']
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A'))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B'))
pytest.raises(KeyError, idx.isin, vals_1, level='C')
|
bsd-3-clause
|
FiniteElementries/OneBus
|
Database/query_api.py
|
1
|
8957
|
import urllib
import json
import numpy as np
import pandas as pd
import multiprocessing
from multiprocessing.pool import ThreadPool
from functools import partial
import time
from yelp.client import Client
from yelp.oauth1_authenticator import Oauth1Authenticator
import config as cf
class Timer:
def __init__(self, fnname):
self.name = fnname
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
print(self.name + " took " + str(self.interval) + " sec.")
def _fetch_url(url):
f = urllib.urlopen(url)
response = json.loads(f.read())
return response
def fs_loc_list(lat, lng, query):
print("using four square")
fs_secret=cf.read_api_config('fs_secret')
fs_client=cf.read_api_config('fs_client')
srchquery="https://api.foursquare.com/v2/venues/search?near=calgary,ab&query="
srchquery+=query
srchquery+="&v=20150214&m=foursquare&client_secret=" + fs_secret + "&client_id=" + fs_client
res = _fetch_url(srchquery)
#print res
loc_list = []
name = []
address = []
for i in range(len(res['response']['venues'])):
lat=res['response']['venues'][i]['location']['lat']
lng=res['response']['venues'][i]['location']['lng']
name.append(res['response']['venues'][i]['name'])
loc_list.append([lat, lng])
address.append(res['response']['venues'][i]['location']['formattedAddress'][0])
gps_array = np.array(loc_list)
name = np.array(name)
address = np.array(address)
return gps_array, name, address
def go_loc_list(lat, lng, query):
# lat = 51.135494
# lng = -114.158389
# query = 'japanese restaurant'
print("using google")
query += " Calgary AB"
loc_p = 'location'+str(lat)+','+str(lng)
qry_list = query.strip().split(' ')
qry_p = 'query=' + qry_list[0]
for i in qry_list[1:]:
qry_p += '+'
qry_p += i
rad_p = 'radius=10000'
api_key = "key=" + cf.read_api_config('google') # yyc Calgary key google places api web service
srch = 'https://maps.googleapis.com/maps/api/place/textsearch/json?'
srch += qry_p + '&'
srch += loc_p + '&'
srch += rad_p + '&'
srch += api_key
res = _fetch_url(srch)
# return res
# print(res)
loc_list = []
name = []
address = []
for loc in res['results']:
lat = loc['geometry']['location']['lat']
lng = loc['geometry']['location']['lng']
loc_list.append([lat, lng])
name.append(loc['name'])
address.append(loc['formatted_address'])
while('next_page_token' in res and len(name)<40):
page_token = "pagetoken=" + res['next_page_token']
srch = 'https://maps.googleapis.com/maps/api/place/textsearch/json?'
# srch += qry_p + '&'
srch += page_token +"&"
srch += api_key
res = _fetch_url(srch)
for loc in res['results']:
lat = loc['geometry']['location']['lat']
lng = loc['geometry']['location']['lng']
loc_list.append([lat, lng])
name.append(loc['name'])
address.append(loc['formatted_address'])
gps_array = np.array(loc_list)
name = np.array(name)
address = np.array(address)
# print name
# print address
return gps_array, name, address
def yelp_batch(lat_lng_pairs, query):
print("using yelp")
with Timer("yelp query"):
partial_yelp = partial(_yelp_batch_indivitual, query=query)
workers = multiprocessing.cpu_count()
# p=multiprocessing.Pool(workers)
p=ThreadPool(100)
result = p.map(partial_yelp, lat_lng_pairs)
p.close()
p.join()
df = pd.concat(result, ignore_index=True)
df.drop_duplicates('name', inplace = True)
print("Total no of raw results " + str(len(df)))
return df
def _yelp_batch_indivitual(lat_lng, query):
return yelp_loc_list(lat_lng[0], lat_lng[1], query)
def _yelp_rec_batch_indivitual(rec, query):
return yelp_rec_list(rec, query)
def yelp_rec_batch(rec_array, query):
print("using yelp")
with Timer("yelp query"):
partial_yelp = partial(_yelp_rec_batch_indivitual, query=query)
# workers = multiprocessing.cpu_count()
# p=multiprocessing.Pool(workers)
p=ThreadPool(100)
result = p.map(partial_yelp, rec_array)
p.close()
p.join()
df = pd.concat(result, ignore_index=True)
df.drop_duplicates('name', inplace = True)
print("Total no of raw results " + str(len(df)))
return df
def yelp_rec_list(rec, query):
"""
return yelp results based on user lat and lng, search query
:param lat:
:param lng:
:param query:
:return: dataframe object, columns=['name', 'address', 'image_url', 'yelp_url', 'review_count', 'ratings_img_url', 'lat','lon']
"""
def get_yelp(rectange):
auth = Oauth1Authenticator( consumer_key=cf.read_api_config('yelp_consumer_key'),
consumer_secret=cf.read_api_config('yelp_consumer_secret'),
token=cf.read_api_config('yelp_token'),
token_secret=cf.read_api_config('yelp_token_secret'))
client = Client(auth)
df = pd.DataFrame(columns=['name', 'address', 'image_url', 'yelp_url', 'review_count', 'ratings_img_url', 'lat','lon'])
for i in range(0, 1):
# if(len(df) < 20 and len(df) != 0):
# break
response = client.search_by_bounding_box(rectange[0], rectange[1], rectange[2], rectange[3], term=query, limit='20', sort='0') # meter)
# response = client.search_by_coordinates( lat, lng, accuracy=None, altitude=None, altitude_accuracy=None, term=query, limit='20', radius_filter=radius_filter, sort='0', offset=str(i*20)) # meter
for loc in response.businesses:
df.loc[len(df)+1]=[loc.name,
' '.join(loc.location.display_address),
loc.image_url, loc.url,
loc.review_count,
loc.rating_img_url,
loc.location.coordinate.latitude,
loc.location.coordinate.longitude]
# df.drop_duplicates('name', inplace = True)
# print("no of raw results " + str(len(df)))
return df
df = get_yelp(rec)
# if(len(df)<20):
# df = get_yelp('20000')
df[['review_count']] = df[['review_count']].astype(int)
print("no of raw results " + str(len(df)))
return df
def yelp_loc_list(lat, lng, query):
"""
return yelp results based on user lat and lng, search query
:param lat:
:param lng:
:param query:
:return: dataframe object, columns=['name', 'address', 'image_url', 'yelp_url', 'review_count', 'ratings_img_url', 'lat','lon']
"""
auth = Oauth1Authenticator( consumer_key=cf.read_api_config('yelp_consumer_key'),
consumer_secret=cf.read_api_config('yelp_consumer_secret'),
token=cf.read_api_config('yelp_token'),
token_secret=cf.read_api_config('yelp_token_secret'))
client = Client(auth)
def get_yelp(radius_filter):
df = pd.DataFrame(columns=['name', 'address', 'image_url', 'yelp_url', 'review_count', 'ratings_img_url', 'lat','lon'])
for i in range(0, 2):
if(len(df) < 20 and len(df) != 0):
break
response = client.search_by_coordinates( lat, lng, accuracy=None, altitude=None, altitude_accuracy=None, term=query, limit='20', radius_filter=radius_filter, sort='0', offset=str(i*20)) # meter
for loc in response.businesses:
df.loc[len(df)+1]=[loc.name,
' '.join(loc.location.display_address),
loc.image_url, loc.url,
loc.review_count,
loc.rating_img_url,
loc.location.coordinate.latitude,
loc.location.coordinate.longitude]
# df.drop_duplicates('name', inplace = True)
# print("no of raw results " + str(len(df)))
return df
df = get_yelp('3000')
# if(len(df)<20):
# df = get_yelp('20000')
df[['review_count']] = df[['review_count']].astype(int)
# print("no of raw results " + str(len(df)))
return df
if __name__=="__main__":
lat = 51.0454027
lng = -114.05651890000001
query = "restaurant"
# print(yelp_loc_list(lat, lng, query))
print(yelp_batch([[51.0454027, -114.05652], [51.0230, -114.123]], query))
|
mit
|
hlin117/scikit-learn
|
benchmarks/bench_tree.py
|
131
|
3647
|
"""
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import matplotlib.pyplot as plt
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
plt.figure('scikit-learn tree benchmark results')
plt.subplot(211)
plt.title('Learning with varying number of samples')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
plt.subplot(212)
plt.title('Learning in high dimensional spaces')
plt.plot(xx, scikit_classifier_results, 'g-', label='classification')
plt.plot(xx, scikit_regressor_results, 'r-', label='regression')
plt.legend(loc='upper left')
plt.xlabel('number of dimensions')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
leggitta/mne-python
|
examples/visualization/plot_topo_compare_conditions.py
|
11
|
2389
|
"""
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and
auditory responses is created. Both conditions
are then accessed by their respective names to
create a sensor layout plot of the related
evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.io import Raw
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
# bad channels in raw.info['bads'] will be automatically excluded
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'yellow', 'green'
title = 'MNE sample data - left vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title)
conditions = [e.comment for e in evokeds]
for cond, col, pos in zip(conditions, colors, (0.025, 0.07)):
plt.figtext(0.99, pos, cond, color=col, fontsize=12,
horizontalalignment='right')
plt.show()
|
bsd-3-clause
|
bhargav/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/utils/tests/test_linear_assignment.py
|
421
|
1349
|
# Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
|
mit
|
sniemi/SamPy
|
resolve/misc/findslits.py
|
1
|
25091
|
'''
Fits slit image to a direct image to find x and y positions.
Currently the script uses a slit image. In the future however
we probably want to use the spectrum itself because there is
no guarantee that a slit confirmation image has always been
taken.
:todo: try to do the minimalization with scipy.optmize or
some other technique which might be faster and/or more
robust.
:requires: PyFITS
:requires: NumPy
:requires: matplotlib
:requires: SciPy
:author: Sami-Matias Niemi
'''
import matplotlib
#matplotlib.rc('text', usetex=True)
#matplotlib.rcParams['font.size'] = 17
import sys
from optparse import OptionParser
import pyfits as PF
import pylab as P
import numpy as np
import matplotlib.patches as patches
from matplotlib import cm
import scipy.optimize as optimize
import scipy.ndimage.interpolation as interpolation
#from SamPy
import SamPy.smnIO.write
import SamPy.smnIO.read
import SamPy.image.manipulation as m
import scipy.ndimage.filters as f
class FindSlitPosition():
def __init__(self):
pass
def _findSlitPositions(self, slitImage, threshold=1000):
'''
Finds slit positions from a slit image.
This method uses the Sobel filter in scipy.ndimage.filters
:todo: this is not ready!
:param: filename
'''
#sobel filter
filtered = f.sobel(slitImage, axis=1)
#create a mask above the threshold
msk = filtered > threshold
masked = filtered[msk]
#indices
y, x = np.indices(slitImage.shape)
yargs = y[msk]
return masked
def slitPosition(self, input, xy):
'''
Find slit positions from a confirmation image.
The script assumes that slits are illuminated and that
background gives at least 10 per cent increase for the
slit data.
:note: modper factor is used to modify the mean background to autolocate the slits.
:param: input: input data
:param: xy: x and y minimum and maximum position to identify a single slit
:rtype: dictionary
'''
#size modifier
sizemod = 1
#modifier
modper = 1.1
#shape of the input array
shape = input.shape
#check indices, rows and columns
row, col = np.indices(shape)
#create an intial mask
#msk = input > val
mn = (np.mean(input[2000:3000, 2000:3000]))
msk = input > (mn * modper)
rm = col[msk]
cm = row[msk]
#mask the appropriate slit
msk = ((rm > xy['xmin']) & (rm < xy['xmax'])) & ((cm < xy['ymax']) & (cm > xy['ymin']))
row = rm[msk]
col = cm[msk]
#check the ends
minrow = np.min(row) + sizemod
maxrow = np.max(row) - sizemod
mincol = np.min(col) + sizemod
maxcol = np.max(col) - sizemod
#get the width and height of the slit image
xymins = (minrow, mincol)
height = maxcol - mincol
width = maxrow - minrow
out = {}
out['xy'] = xymins
out['width'] = width
out['height'] = height
out['ymin'] = mincol
out['ymax'] = maxcol
out['xmin'] = minrow
out['xmax'] = maxrow
out['values'] = input[mincol:maxcol + 1, minrow:maxrow + 1]
out['shape'] = shape
out['throughput'] = 1.0
return out
def readSlitPositions(self):
'''
Reads slit positions from a slitfile and slitdata from another file.
This file should follow DS9 format, i.e.:
box 1545 871 7 499 0
box 1512 1522 7 614 0
box 1482 2175 7 499 0
:note: slit image positions, not the slit positions on the sky!
:return: information about the slits, positions and data in slit image
:rtype: dictionary
'''
slits = []
filedata = open(self.slitPos, 'r').readlines()
for i, line in enumerate(filedata):
out = {}
tmp = line.split()
out['width'] = int(tmp[3])
out['height'] = int(tmp[4])
out['ymid'] = int(tmp[2])
out['xmid'] = int(tmp[1])
out['ymin'] = out['ymid'] - (out['height'] / 2)
out['ymax'] = out['ymid'] + (out['height'] / 2)
out['xmin'] = out['xmid'] - (out['width'] / 2) + 1
out['xmax'] = out['xmid'] + (out['width'] / 2) + 1
out['xy'] = (out['xmin'], out['ymin'])
out['shape'] = self.slitImage.shape
out['throughput'] = 1.0
out['values'] = self.slitImage[out['ymin']:out['ymax'] + 1, out['xmin']:out['xmax'] + 1].copy()
out['number'] = i
out['pixels'] = len(out['values'].ravel())
if i == 0:
out['name'] = 'low'
elif i == 2:
out['name'] = 'up'
else:
out['name'] = 'mid'
slits.append(out)
self.slits = slits
def generateSlitMask(self, slits, throughput=False):
'''
This function can be used to generate a slit mask from given slits.
'''
if len(set([x['shape'] for x in slits])) > 1:
print 'Shape of the slits do not match'
#slitmask
slitmask = np.zeros(slits[0]['shape'])
for slit in slits:
if throughput:
val = slit['throughput']
else:
val = 1.0
slitmask[slit['ymin']:slit['ymax'] + 1,
slit['xmin']:slit['xmax'] + 1] = val
return slitmask
def generateSkyMask(self, slits, offsetx=0, offsety=0):
'''
This function can be used to generate a slit mask on the sky
'''
skymask = np.zeros(slits[0]['shape'])
for slit in slits:
skymask[slit['ymin'] + offsety:slit['ymax'] + 1 + offsety,
slit['xmin'] + offsetx:slit['xmax'] + 1 + offsetx] = 1
return skymask
def generateSlitImages(self, output, type='.pdf'):
'''
Generates diagnostic plots from slit image.
'''
rotText = 40
#generate a separate image of the slit data of each slit image.
for i, slit in enumerate(self.slits):
fig = P.figure()
ax = fig.add_subplot(111)
#take log10 from the slit data
tmp = slit['values'] * slit['throughput']
tmp[tmp <= 0.0] = 1
tmp = np.log10(tmp)
ax.imshow(tmp,
origin='lower', interpolation=None)
#rotate x axis labels
for tl in ax.get_xticklabels():
tl.set_rotation(rotText)
P.savefig(output + str(i + 1) + type)
P.close()
#make a single slit image
fig = P.figure()
for i, slit in enumerate(self.slits):
ax = fig.add_subplot(1, len(self.slits), i + 1)
#take log10 from the slit data
tmp = slit['values'] * slit['throughput']
tmp[tmp <= 0.0] = 1
tmp = np.log10(tmp)
#take log10 from the slit data
ax.imshow(tmp,
origin='lower', interpolation=None)
#rotate x axis labels
for tl in ax.get_xticklabels():
tl.set_rotation(rotText)
#annotate
ax.annotate('slit' + str(i + 1), xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
P.savefig(output + 'All' + type)
P.close()
def overPlotSlits(self, output, type='.pdf', logscale=True):
'''
Overplot the slits to image data. Will overplot both the original slit
positions and the best fitted position. Will also plot residuals.
:param: output, output file name
:param: type
:param: logscale, whether a log10 should be taken from the image data
'''
#make a copy of the imdata, in case we modify it...
img = self.img.copy()
fig = P.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
#show image
img[img < 0] = 0
if logscale:
img[img > 0] = np.log10(img[img > 0])
ax1.imshow(img, origin='lower', interpolation=None)
#original Slits
for slit in self.slits:
ax1.add_patch(patches.Rectangle(slit['xySky'],
slit['width'],
slit['height'],
fill=False))
#fitted slit positions
tmp = self.result['output'][np.argmin(self.result['output'][:, 3]), :]
rot = tmp[0]
x = tmp[1]
y = tmp[2]
for slit in self.slits:
tmp = (slit['xySky'][0] + x, slit['xySky'][1] + y)
patch = patches.Rectangle(tmp,
slit['width'],
slit['height'],
fill=False,
ec='red')
t2 = matplotlib.transforms.Affine2D().rotate_deg(rot) + ax1.transData
patch.set_transform(t2)
ax1.add_patch(patch)
#rotate x axis labels
for tl in ax1.get_xticklabels():
tl.set_rotation(40)
#rotate x axis labels
for tl in ax2.get_xticklabels():
tl.set_rotation(40)
#plot residuals
z = np.ones(img.shape)
for i, slit in enumerate(self.slits):
y1 = slit['yminSky'] + y
y2 = slit['ymaxSky'] + y + 1
x1 = slit['xmin'] + x
x2 = slit['xmax'] + x + 1
z[y1:y2, x1:x2] = slit['values'] / self.result['chiMinData'][i]
i2 = ax2.imshow(z, origin='lower', interpolation=None,
cmap=cm.get_cmap('binary'), vmin=0.795, vmax=1.205)
c2 = fig.colorbar(i2, ax=ax2, shrink=0.7, fraction=0.05)
c2.set_label('Slit Values / Direct Image Data')
#annotate
ax1.annotate('Fitted Position', xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
ax2.annotate('Residuals', xy=(0.5, 1.05),
xycoords='axes fraction', ha='center', va='center')
#save the first image
P.savefig(output + type)
#zoom-in version
ymin = np.min(np.asarray([x['yminSky'] for x in self.slits]))
ymax = np.max(np.asarray([x['ymaxSky'] for x in self.slits]))
xmin = np.min(np.asarray([x['xmin'] for x in self.slits]))
xmax = np.max(np.asarray([x['xmax'] for x in self.slits]))
ax1.set_xlim(xmin - 200, xmax + 200)
ax2.set_xlim(xmin - 200, xmax + 200)
ax1.set_ylim(ymin - 100, ymax + 100)
ax2.set_ylim(ymin - 100, ymax + 100)
P.savefig(output + 'Zoomed' + type)
P.close()
del img
def writeDS9RegionFile(self, output='skyslits.reg'):
'''
Writes a DS9 region file for all the slits.
Draws a rectangle around each slit.
'''
fh = open(output, 'w')
for slit in self.slits:
#DS0 box format is x, y, width, height, but x and y are the centre point
string = 'box %i %i %i %i 0\n' % (slit['xySky'][0] + slit['width'] / 2,
slit['xySky'][1] + slit['height'] / 2,
slit['width'],
slit['height'])
fh.write(string)
fh.close()
def approxSkyPosition(self, lw=553, up=553, lwthr=0.9, upthr=0.9):
'''
Generates an approximated sky position for slits.
Assumes that both slits are shifted 553 pixels in y direction.
Such an assumption is crude, but should allow a starting point.
:note: this functions modifies the slit throughput
:todo: one should refine this after measuring the sky position accurately.
:param: lw, pixels to shift the lower slit
:param: up, pixels to shift the upper slit
:param: lwthr, 1/lwthr will be the throughput modifier for the lower slit
:param: upthr, 1/upthr will be the throughput modifier for the upper slit
'''
for i, slit in enumerate(self.slits):
if slit['name'] == 'up':
mod = - up
thr = upthr
elif slit['name'] == 'low':
mod = lw
thr = lwthr
else:
mod = 0
thr = 1.0
self.slits[i]['yminSky'] = slit['ymin'] + mod
self.slits[i]['ymaxSky'] = slit['ymax'] + mod
self.slits[i]['xySky'] = (slit['xy'][0], slit['xy'][1] + mod)
self.slits[i]['throughput'] = 1. / thr
def chiSquare(self, model, obs):
'''
Simple chi**2 calculation
'''
r = np.sum((obs - model) ** 2 / model)
return r
def _fitfunct(self, x, y, directimage, slits):
#get data from direct image
dirdata = []
for slit in slits:
d = directimage[slit['ymin'] + int(y):slit['ymax'] + 1 + int(y),
slit['xmin'] + int(x):slit['xmax'] + 1 + int(x)]
dirdata.append(d)
obs = np.hstack((dirdata[0].ravel(),
dirdata[1].ravel(),
dirdata[2].ravel()))
obs /= np.max(obs)
return obs
def _errorf(self, params, directimage, slits, data):
return self._fitfunct(params[0], params[1], directimage, slits) - data
def fitSlitsToDirectImageLSQ(self, slits, directimage, params=[-1, -1]):
'''
Uses scipy.optimize.leastsq
:note: This does not really work at the mo...
'''
#generates a model array from the slit values, takes into account potential
#throughput of a slit
data = np.hstack((slits[0]['values'].ravel() * slits[0]['throughput'],
slits[1]['values'].ravel() * slits[1]['throughput'],
slits[2]['values'].ravel() * slits[2]['throughput']))
data /= np.max(data)
p = optimize.leastsq(self._errorf,
params,
args=(directimage, slits, data),
full_output=True,
ftol=1e-18,
xtol=1e-18)
return p
def fitSlitsToDirectImage(self,
xran=50, yran=50, step=1,
rot=1.0, rotstep=0.1, rotation=True,
normalize=False, debug=True):
'''
Fits a slit image to a direct image.
This functions does not collapse the slit image, but uses each pixel.
By default the counts are normalized to a peak count, but this can
be controlled using the optional keyword normalize.
:param: xran, +/- x-range to cover
:param: yran, +/- y-range to cover
:param: step, size of pixel steps in x and y
:param: rot, +/- rotation angle in degrees
:param: rotstep, step in degrees
:param: normalize, whether slit and direct image values should be normalized or not
:param: debug, print debugging information
:rtype: dictionary
'''
#generates a model array from the slit values, takes into account potential
#throughput of a slit
self.model = np.hstack((self.slits[0]['values'].ravel() * self.slits[0]['throughput'],
self.slits[1]['values'].ravel() * self.slits[1]['throughput'],
self.slits[2]['values'].ravel() * self.slits[2]['throughput']))
#self.msk = self.model > 0.0
#self.model = self.model[self.msk]
if normalize:
self.model /= np.max(self.model)
norm = len(self.model)
#generate rotations
if rotation:
rotations = np.arange(-rot, rot, rotstep)
rotations[(rotations < 1e-8) & (rotations > -1e-8)] = 0.0
#make a copy of the direct image
origimage = self.img.copy()
else:
rotations = [0, ]
out = []
chmin = -9.99
cm = 1e50
#loop over a range of rotations, x and y positions around the nominal position and record x, y and chisquare
for r in rotations:
if rotation:
if r != 0.0:
d = interpolation.rotate(origimage, r, reshape=False)
else:
d = origimage.copy()
else:
d = self.img.copy()
for x in range(-xran, xran, step):
for y in range(-yran, yran, step):
dirdata = []
#get data from direct image
for s in self.slits:
data = d[s['yminSky'] + y:s['ymaxSky'] + 1 + y, s['xmin'] + x:s['xmax'] + 1 + x]
dirdata.append(data)
obs = np.hstack((dirdata[0].ravel(),
dirdata[1].ravel(),
dirdata[2].ravel()))
#obs = obs[self.msk]
if normalize:
obs /= np.max(obs)
chisq = self.chiSquare(self.model, obs)
out.append([r, x, y, chisq, chisq / norm])
#save the dirdata of the minimum chisqr
if chisq < cm:
chmin = dirdata
cm = chisq
if debug:
print r, x, y, chisq, chisq / norm
#return dictionary
r = {}
r['rot'] = rot
r['rotation_step'] = rotstep
r['xran'] = xran
r['yran'] = yran
r['model'] = self.model
r['output'] = np.array(out)
r['chiMinData'] = chmin
self.result = r
def fakeSlitData(self):
'''
Cuts out imaging data to test the fitting algorithm.
'''
for slit in self.slits:
slit['values'] = self.slitImage[slit['ymin']:slit['ymax'] + 1, slit['xmin']:slit['xmax'] + 1]
def plotMinimalization(self, output='minim', type='.png'):
'''
Generates a two dimensional map of the minimalization.
:param: data
'''
d = self.result['output']
#begin image
P.figure()
P.scatter(d[:, 1],
d[:, 2],
c=1. / np.log10(d[:, 3]),
s=20,
cmap=cm.get_cmap('jet'),
edgecolor='none',
alpha=0.5)
P.xlim(-self.result['xran'], self.result['xran'])
P.ylim(-self.result['yran'], self.result['yran'])
P.xlabel('X [pixels]')
P.ylabel('Y [pixels]')
P.savefig(output + 'Map' + type)
P.close()
#second figure
P.figure()
P.scatter(d[:, 0], d[:, 3], s=2)
P.xlim(-self.result['rot'], self.result['rot'])
P.ylim(0.9 * np.min(d[:, 3]), 1.05 * np.max(d[:, 3]))
P.xlabel('Rotation [degrees]')
P.ylabel('$\chi^{2}$')
P.savefig(output + 'Rot' + type)
P.close()
#third figure
P.figure()
P.scatter(d[:, 1], d[:, 3], s=2)
P.xlim(-self.result['xran'], self.result['xran'])
P.ylim(0.9 * np.min(d[:, 3]), 1.05 * np.max(d[:, 3]))
P.xlabel('X [pixels]')
P.ylabel('$\chi^{2}$')
P.savefig(output + 'XCut' + type)
P.close()
#forth figure
P.figure()
P.scatter(d[:, 2], d[:, 3], s=2)
P.xlim(-self.result['yran'], self.result['yran'])
P.ylim(0.9 * np.min(d[:, 3]), 1.05 * np.max(d[:, 3]))
P.xlabel('Y [pixels]')
P.ylabel('$\chi^{2}$')
P.savefig(output + 'YCut' + type)
P.close()
def outputMinima(self, output='min.txt', stdout=True):
'''
Outputs the results to a file and also the screen if stdout = True.
'''
tmp = self.result['output'][np.argmin(self.result['output'][:, 3]), :]
str = '{0:>s}\t{1:>s}\t{2:>s}\t{3:.1f}\t{4:.0f}\t{5:.0f}\t{6:.1f}'.format(self.fitImage,
self.slit,
self.slitPos,
tmp[0],
tmp[1],
tmp[2],
tmp[3])
#to screen
if stdout:
print '\n\ndirect image slit image slit pos \t\t rot \t x \t y \t chi**2'
print str
#to file
fh = open(output, 'a')
fh.write(str + '\n')
fh.close()
def runAll(self, opts, args):
'''
Driver function
'''
if (opts.slit is None or opts.fitImage is None):
processArgs(True)
sys.exit(1)
#rename the command line options
self.slit = opts.slit
self.fitImage = opts.fitImage
#slit position file defaults to slit.reg
if (opts.position is None):
self.slitPos = 'slit.reg'
print 'Using {0:>s} for slit positions'.format(slitPos)
else:
self.slitPos = opts.position
#debugging mode, where slit data are being faked
debug = opts.debug
#whether the data should be blurred or not
blur = opts.blur
#boolean to control whether the slit positions should be found automatically or not
automatic = opts.automatic
#load images
img = PF.open(self.fitImage, ignore_missing_end=True)[0].data
if img.shape[0] == 1:
img = img[0]
slitimage = PF.open(self.slit, ignore_missing_end=True)[0].data
if slitimage.shape[0] == 1:
slitimage = slitimage[0]
if blur:
img = m.blurImage(img, 4)
self.slitImage = slitimage
self.img = img
if automatic:
#gets the slit positions automatically, does not work perfectly
upslit = self.slitPosition(slitimage, {'xmin': 1460, 'xmax': 1500, 'ymin': 1900, 'ymax': 2500})
midslit = self.slitPosition(slitimage, {'xmin': 1500, 'xmax': 1525, 'ymin': 1200, 'ymax': 1850})
lowslit = self.slitPosition(slitimage, {'xmin': 1530, 'xmax': 1550, 'ymin': 600, 'ymax': 1130})
self.slits = (upslit, midslit, lowslit)
else:
self.readSlitPositions()
self.generateSlitImages('slits')
self.approxSkyPosition()
#self.approxSkyPosition(lw=553, up=553)
self.writeDS9RegionFile()
if debug:
self.fakeSlitData()
#a = fitSlitsToDirectImageLSQ(slits, img)
#print a
#import sys; sys.exit()
#find the chisqr minimum and make a diagnostic plot
#self.fitSlitsToDirectImage(xran=100, yran=100, rotation=False)
self.fitSlitsToDirectImage(xran=100, yran=100, rot=3.0, rotstep=0.1)
self.plotMinimalization()
#output some info
self.outputMinima()
#generates diagnostic plots and writes the slit positions for DS9 inspection
self.overPlotSlits('overplottedOriginalsLog')
def processArgs(just_print_help=False):
'''
Processes command line arguments
'''
parser = OptionParser()
parser.add_option("-s", "--slit", dest="slit",
help="Name of the slit image file", metavar="string")
parser.add_option("-f", "--fitting", dest="fitImage",
help='Name of the direct image to which the slit data will be fitted', metavar='string')
parser.add_option("-d", "--debug", dest="debug", action='store_true',
help='Debugging mode on')
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Verbose mode on")
parser.add_option("-p", "--position", dest="position",
help="Name of the slit position file", metavar="string")
parser.add_option("-b", "--blur", action="store_true", dest="blur",
help="Whether the input direct image should be gaussian blurred or not")
parser.add_option("-a", "--automatic", action="store_true", dest="automatic",
help="If on tries to determine slit positions automatically from the slit image")
if just_print_help:
parser.print_help()
else:
return parser.parse_args()
if __name__ == '__main__':
#Gets the command line arguments and call main function
find = FindSlitPosition()
find.runAll(*processArgs())
|
bsd-2-clause
|
Wittlich/DAT210x-Python
|
Module6/assignment6.py
|
1
|
2852
|
import pandas as pd
import time
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# Grab the DLA HAR dataset from:
# http://groupware.les.inf.puc-rio.br/har
# http://groupware.les.inf.puc-rio.br/static/har/dataset-har-PUC-Rio-ugulino.zip
#
# TODO: Load up the dataset into dataframe 'X'
X = pd.read_csv('Datasets/dataset-har-PUC-Rio-ugulino.csv', delimiter=';', decimal=',')
#
# TODO: Encode the gender column, 0 as male, 1 as female
X['gender'] = X['gender'].map({'Woman': 1, 'Man': 0})
#
# TODO: Clean up any column with commas in it
# so that they're properly represented as decimals instead
#
# INFO: Check data types
print(X.dtypes)
#
# TODO: Convert any column that needs to be converted into numeric
# use errors='raise'. This will alert you if something ends up being
# problematic
#
# INFO: If you find any problematic records, drop them before calling the
# to_numeric methods above...
X['z4'] = X['z4'].apply(pd.to_numeric, errors='coerce')
X.dropna(axis=0, inplace=True)
#
# TODO: Encode your 'y' value as a dummies version of your dataset's "class" column
y = pd.get_dummies(X['class'])
#
# TODO: Get rid of the user and class columns
X.drop(labels=['user', 'class'], axis=1, inplace=True)
print(X.describe())
#
# INFO: An easy way to show which rows have nans in them
print(X[pd.isnull(X).any(axis=1)])
#
# TODO: Create an RForest classifier 'model' and set n_estimators=30,
# the max_depth to 10, and oob_score=True, and random_state=0
model = RandomForestClassifier(n_estimators=30, max_depth=10, oob_score=True, random_state=0)
#
# TODO: Split your data into test / train sets
# Your test size can be 30% with random_state 7
# Use variable names: X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)
print('Fitting...')
s = time.time()
#
# TODO: train your model on your training set
model.fit(X_train, y_train)
print('Fitting completed in: %f' % (time.time() - s))
#
# INFO: Display the OOB Score of your data
score = model.score(X_test, y_test)
print('OOB Score: %f' % round(score * 100, 3))
print('Scoring...')
s = time.time()
#
# TODO: score your model on your test set
score = model.oob_score_
print('Score: %f' % round(score * 100, 3))
print('Scoring completed in: %f' % (time.time() - s))
#
# TODO: Answer the lab questions, then come back to experiment more
#
# TODO: Try playing around with the gender column
# Encode it as Male:1, Female:0
# Try encoding it to pandas dummies
# Also try dropping it. See how it affects the score
# This will be a key on how features affect your overall scoring
# and why it's important to choose good ones.
#
# TODO: After that, try messing with 'y'. Right now its encoded with
# dummies try other encoding methods to experiment with the effect.
|
mit
|
PrashntS/scikit-learn
|
sklearn/linear_model/ransac.py
|
191
|
14261
|
# coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
|
bsd-3-clause
|
tp81/openmicroscopy
|
components/tools/OmeroPy/test/unit/test_jvmcfg.py
|
2
|
7569
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of the automatic JVM setting logic for OMERO startup.
"""
import pytest
from omero.config import ConfigXml, xml
from omero.install.jvmcfg import adjust_settings
from omero.install.jvmcfg import ManualStrategy
from omero.install.jvmcfg import PercentStrategy
from omero.install.jvmcfg import Settings
from omero.install.jvmcfg import Strategy
from omero.install.jvmcfg import strip_dict
from omero.install.jvmcfg import usage_charts
from omero.util.temp_files import create_path
from path import path
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import tostring
from xml.etree.ElementTree import XML
from test.unit.test_config import initial
def write_config(data):
p = create_path()
i = initial()
for k, v in data.items():
for x in i[0:2]: # __ACTIVE__ & default
SubElement(x, "property", name=k, value=v)
string = tostring(i, 'utf-8')
txt = xml.dom.minidom.parseString(string).toprettyxml(" ", "\n", None)
p.write_text(txt)
return p
class TestMemoryStrip(object):
def test_1(self):
rv = strip_dict({"a.b": "c"}, prefix="a")
assert {"b": "c"} == rv
def test_2(self):
rv = strip_dict({"a.b.c": "d"}, prefix="a.b")
assert rv["c"] == "d"
def test_3(self):
rv = strip_dict({
"omero.jvmcfg.foo": "a",
"something.else": "b"})
assert rv["foo"] == "a"
assert "something.else" not in rv
@pytest.mark.parametrize("input,output", (
({"omero.jvmcfg.heap_size.blitz": "1g"}, {"heap_size": "1g"}),
))
def test_4(self, input, output):
p = write_config(input)
config = ConfigXml(filename=str(p), env_config="default")
try:
m = config.as_map()
s = strip_dict(m, suffix="blitz")
assert s == output
finally:
config.close()
def test_5(self):
rv = strip_dict({
"omero.jvmcfg.a.blitz": "b",
}, suffix="blitz")
assert rv["a"] == "b"
class TestSettings(object):
def test_initial(self):
s = Settings()
assert s.perm_gen == "128m"
assert s.heap_dump == "off"
assert s.heap_size == "512m"
def test_explicit(self):
s = Settings({
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_defaults(self):
s = Settings({}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_both(self):
s = Settings({
"perm_gen": "aaa",
"heap_dump": "bbb",
"heap_size": "ccc",
}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "aaa"
assert s.heap_dump == "bbb"
assert s.heap_size == "ccc"
class TestStrategy(object):
def test_no_instantiate(self):
with pytest.raises(Exception):
Strategy("blitz")
def test_hard_coded(self):
strategy = ManualStrategy("blitz")
settings = strategy.get_memory_settings()
assert settings == [
"-Xmx512m",
"-XX:MaxPermSize=128m",
"-XX:+IgnoreUnrecognizedVMOptions",
]
def test_percent_usage(self):
strategy = PercentStrategy("blitz")
table = list(strategy.usage_table(15, 16))[0]
assert table[0] == 2**15
assert table[1] == 2**15*15/100
def test_heap_dump_on(self):
settings = Settings({"heap_dump": "on"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert not append
def test_heap_dump_tmp(self):
settings = Settings({"heap_dump": "tmp"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert "HeapDumpPath" in "".join(append)
class AdjustFixture(object):
def __init__(self, input, output, name, **kwargs):
self.input = input
self.output = output
self.name = name
self.kwargs = kwargs
def validate(self, rv):
for k, v in self.output.items():
assert k in rv
found = rv[k]
found.pop(0) # settings
assert v == found, "%s.%s: %s <> %s" % (self.name, k,
v, found)
import json
f = open(__file__[:-3] + ".json", "r")
data = json.load(f)
AFS = []
for x in data:
AFS.append(AdjustFixture(x["input"], x["output"], x["name"]))
def template_xml():
templates = path(__file__) / ".." / ".." / ".."
templates = templates / ".." / ".." / ".."
templates = templates / "etc" / "grid" / "templates.xml"
templates = templates.abspath()
return XML(templates.text())
class TestAdjustStrategy(object):
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_adjust(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
xml = template_xml()
config = ConfigXml(filename=str(p), env_config="default")
try:
rv = adjust_settings(config, xml, **fixture.kwargs)
fixture.validate(rv)
finally:
config.close()
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_12527(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
old_templates = path(__file__).dirname() / "old_templates.xml"
xml = XML(old_templates.abspath().text())
config = ConfigXml(filename=str(p), env_config="default")
with pytest.raises(Exception):
adjust_settings(config, xml, **fixture.kwargs)
class TestChart(object):
def test_percent_chart(self):
try:
usage_charts("target/charts.png")
except ImportError:
# Requires matplotlib, etc
pass
|
gpl-2.0
|
bhargav/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
91
|
3522
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
bsd-3-clause
|
CellModels/tyssue
|
tests/geometry/test_planar_geometry.py
|
2
|
2016
|
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from tyssue import config
from tyssue.core import Epithelium
from tyssue.generation import (
three_faces_sheet,
extrude,
hexa_grid3d,
hexa_grid2d,
subdivide_faces,
)
from tyssue.geometry.planar_geometry import PlanarGeometry
from numpy import pi
def test_face_projected_pos():
datasets = {}
tri_verts = [[0, 0], [1, 0], [-0.5, 0.75], [-0.5, -0.75]]
tri_edges = [
[0, 1, 0],
[1, 2, 0],
[2, 0, 0],
[0, 3, 1],
[3, 1, 1],
[1, 0, 1],
[0, 2, 2],
[2, 3, 2],
[3, 0, 2],
]
datasets["edge"] = pd.DataFrame(
data=np.array(tri_edges), columns=["srce", "trgt", "face"]
)
datasets["edge"].index.name = "edge"
datasets["face"] = pd.DataFrame(data=np.zeros((3, 2)), columns=["x", "y"])
datasets["face"].index.name = "face"
datasets["vert"] = pd.DataFrame(data=np.array(tri_verts), columns=["x", "y"])
datasets["vert"].index.name = "vert"
specs = config.geometry.planar_spec()
eptm = Epithelium("extra", datasets, specs, coords=["x", "y"])
PlanarGeometry.update_all(eptm)
res_rot_pos_pi2 = PlanarGeometry.face_projected_pos(eptm, 0, pi / 2.0)
res_rot_pos_face1_2pi = PlanarGeometry.face_projected_pos(eptm, 1, 2.0 * pi)
expected_rot_pos_pi2 = pd.DataFrame.from_dict(
{
"vert": [0, 1, 2, 3],
"x": [0.25, 0.25, -0.5, 1.0],
"y": [-0.166667, 0.8333333, -0.666667, -0.666667],
}
).set_index("vert")
expected_rot_pos_face1_2pi = pd.DataFrame.from_dict(
{
"vert": [0, 1, 2, 3],
"x": [-0.166667, 0.833333, -0.666667, -0.666667],
"y": [0.25, 0.25, 1.00, -0.5],
}
).set_index("vert")
tolerance = 1e-16
assert all((expected_rot_pos_pi2 - res_rot_pos_pi2) ** 2 < tolerance)
assert all((expected_rot_pos_face1_2pi - res_rot_pos_face1_2pi) ** 2 < tolerance)
|
gpl-2.0
|
cbmoore/statsmodels
|
statsmodels/stats/tests/test_power.py
|
28
|
25876
|
# -*- coding: utf-8 -*-
# pylint: disable=W0231, W0142
"""Tests for statistical power calculations
Note:
tests for chisquare power are in test_gof.py
Created on Sat Mar 09 08:44:49 2013
Author: Josef Perktold
"""
import copy
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose, assert_raises,
assert_equal, assert_warns)
import statsmodels.stats.power as smp
import warnings
#from .test_weightstats import CheckPowerMixin
from statsmodels.stats.tests.test_weightstats import Holder
# for testing plots
import nose
from numpy.testing import dec
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except ImportError:
have_matplotlib = False
class CheckPowerMixin(object):
def test_power(self):
#test against R results
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res1 = self.cls()
assert_almost_equal(res1.power(**kwds), self.res2.power, decimal=decimal)
def test_positional(self):
res1 = self.cls()
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
# positional args
if hasattr(self, 'args_names'):
args_names = self.args_names
else:
nobs_ = 'nobs' if 'nobs' in kwds else 'nobs1'
args_names = ['effect_size', nobs_, 'alpha']
# pop positional args
args = [kwds.pop(arg) for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res = res1.power(*args, **kwds)
assert_almost_equal(res, self.res2.power, decimal=decimal)
def test_roots(self):
kwds = copy.copy(self.kwds)
kwds.update(self.kwds_extra)
# kwds_extra are used as argument, but not as target for root
for key in self.kwds:
# keep print to check whether tests are really executed
#print 'testing roots', key
value = kwds[key]
kwds[key] = None
result = self.cls().solve_power(**kwds)
assert_allclose(result, value, rtol=0.001, err_msg=key+' failed')
# yield can be used to investigate specific errors
#yield assert_allclose, result, value, 0.001, 0, key+' failed'
kwds[key] = value # reset dict
@dec.skipif(not have_matplotlib)
def test_power_plot(self):
if self.cls == smp.FTestPower:
raise nose.SkipTest('skip FTestPower plot_power')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
fig = self.cls().plot_power(dep_var='nobs',
nobs= np.arange(2, 100),
effect_size=np.array([0.1, 0.2, 0.3, 0.5, 1]),
#alternative='larger',
ax=ax, title='Power of t-Test',
**self.kwds_extra)
ax = fig.add_subplot(2,1,2)
fig = self.cls().plot_power(dep_var='es',
nobs=np.array([10, 20, 30, 50, 70, 100]),
effect_size=np.linspace(0.01, 2, 51),
#alternative='larger',
ax=ax, title='',
**self.kwds_extra)
plt.close('all')
#''' test cases
#one sample
# two-sided one-sided
#large power OneS1 OneS3
#small power OneS2 OneS4
#
#two sample
# two-sided one-sided
#large power TwoS1 TwoS3
#small power TwoS2 TwoS4
#small p, ratio TwoS4 TwoS5
#'''
class TestTTPowerOneS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.9995636009612725
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS2(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.1359562887679666
res2.alternative = 'two.sided'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, prefix='tt_power1_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.999892010204909
res2.alternative = 'greater'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS4(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.05,n=20,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.05
res2.sig_level = 0.05
res2.power = 0.0764888785042198
res2.alternative = 'greater'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS5(CheckPowerMixin):
# case one-sided less, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.006063932667926375
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerOneS6(CheckPowerMixin):
# case one-sided less, negative effect size, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=-0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = -0.2
res2.sig_level = 0.05
res2.power = 0.21707518167191
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerTwoS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.967708258242517
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.1,n=20,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.06095912465411235
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, prefix='tt_power2_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.985459690251624
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS4(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.01,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 30
res2.d = 0.01
res2.sig_level = 0.05
res2.power = 0.0540740302835667
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS5(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="two.sided")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.0633081832564667
res2.alternative = 'two.sided'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'two-sided'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS6(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="greater")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.09623589080917805
res2.alternative = 'greater'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
def test_normal_power_explicit():
# a few initial test cases for NormalIndPower
sigma = 1
d = 0.3
nobs = 80
alpha = 0.05
res1 = smp.normal_power(d, nobs/2., 0.05)
res2 = smp.NormalIndPower().power(d, nobs, 0.05)
res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, power=None)
res_R = 0.475100870572638
assert_almost_equal(res1, res_R, decimal=13)
assert_almost_equal(res2, res_R, decimal=13)
assert_almost_equal(res3, res_R, decimal=13)
norm_pow = smp.normal_power(-0.01, nobs/2., 0.05)
norm_pow_R = 0.05045832927039234
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.056869534873146124
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
# Note: negative effect size is same as switching one-sided alternative
# TODO: should I switch to larger/smaller instead of "one-sided" options
norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.0438089705093578
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
class TestNormalIndPower1(CheckPowerMixin):
def __init__(self):
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 80
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.NormalIndPower
class TestNormalIndPower2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> np = pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.h = 0.01
res2.n = 80
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = ('Difference of proportion power calculation for' +
' binomial distribution (arcsine transformation)')
res2.note = 'same sample sizes'
self.res2 = res2
self.kwds = {'effect_size': res2.h, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp1(CheckPowerMixin):
def __init__(self):
# forcing one-sample by using ratio=0
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 40
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp2(CheckPowerMixin):
# Note: same power as two sample case with twice as many observations
def __init__(self):
# forcing one-sample by using ratio=0
res2 = Holder()
#> np = pwr.norm.test(d=0.01,n=40,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.d = 0.01
res2.n = 40
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = 'Mean power calculation for normal distribution with known variance'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0, 'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestChisquarePower(CheckPowerMixin):
def __init__(self):
# one example from test_gof, results_power
res2 = Holder()
res2.w = 0.1
res2.N = 5
res2.df = 4
res2.sig_level = 0.05
res2.power = 0.05246644635810126
res2.method = 'Chi squared power calculation'
res2.note = 'N is the number of observations'
self.res2 = res2
self.kwds = {'effect_size': res2.w, 'nobs': res2.N,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'n_bins': res2.df + 1}
self.cls = smp.GofChisquarePower
def _test_positional(self):
res1 = self.cls()
args_names = ['effect_size','nobs', 'alpha', 'n_bins']
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
args = [kwds[arg] for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal #pylint: disable-msg=E1101
else:
decimal = 6
assert_almost_equal(res1.power(*args), self.res2.power, decimal=decimal)
def test_ftest_power():
#equivalence ftest, ttest
for alpha in [0.01, 0.05, 0.1, 0.20, 0.50]:
res0 = smp.ttest_power(0.01, 200, alpha)
res1 = smp.ftest_power(0.01, 199, 1, alpha=alpha, ncc=0)
assert_almost_equal(res1, res0, decimal=6)
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
res1 = smp.ftest_anova_power(0.25, 200, 0.1592, k_groups=10)
res0 = 0.8408
assert_almost_equal(res1, res0, decimal=4)
# TODO: no class yet
# examples agains R::pwr
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.1**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.01
res2.sig_level = 0.01
res2.power = 0.0494137732920332
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.3**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.09
res2.sig_level = 0.01
res2.power = 0.7967191006290872
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
# class based version of two above test for Ftest
class TestFtestAnovaPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
#converted to res2 by hand
res2.f = 0.25
res2.n = 200
res2.k = 10
res2.alpha = 0.1592
res2.power = 0.8408
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.f, 'nobs': res2.n,
'alpha': res2.alpha, 'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'k_groups': res2.k} # rootfinding doesn't work
#self.args_names = ['effect_size','nobs', 'alpha']#, 'k_groups']
self.cls = smp.FTestAnovaPower
# precision for test_power
self.decimal = 4
class TestFtestPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': np.sqrt(res2.f2), 'df_num': res2.v,
'df_denom': res2.u, 'alpha': res2.sig_level,
'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {}
self.args_names = ['effect_size', 'df_num', 'df_denom', 'alpha']
self.cls = smp.FTestPower
# precision for test_power
self.decimal = 5
def test_power_solver():
# messing up the solver to trigger backup
nip = smp.NormalIndPower()
# check result
es0 = 0.1
pow_ = nip.solve_power(es0, nobs1=1600, alpha=0.01, power=None, ratio=1,
alternative='larger')
# value is regression test
assert_almost_equal(pow_, 0.69219411243824214, decimal=5)
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 2)
# cause first optimizer to fail
nip.start_bqexp['effect_size'] = {'upp': -10, 'low': -20}
nip.start_ttp['effect_size'] = 0.14
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3, err_msg=repr(nip.cache_fit_res))
nip.start_ttp['effect_size'] = np.nan
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 4)
# I let this case fail, could be fixed for some statistical tests
# (we shouldn't get here in the first place)
# effect size is negative, but last stage brentq uses [1e-8, 1-1e-8]
assert_raises(ValueError, nip.solve_power, None, nobs1=1600, alpha=0.01,
power=0.005, ratio=1, alternative='larger')
def test_power_solver_warn():
# messing up the solver to trigger warning
# I wrote this with scipy 0.9,
# convergence behavior of scipy 0.11 is different,
# fails at a different case, but is successful where it failed before
pow_ = 0.69219411243824214 # from previous function
nip = smp.NormalIndPower()
# using nobs, has one backup (fsolve)
nip.start_bqexp['nobs1'] = {'upp': 50, 'low': -20}
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
import scipy
if scipy.__version__ < '0.10':
assert_almost_equal(val, 1600, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3)
# case that has convergence failure, and should warn
nip.start_ttp['nobs1'] = np.nan
from statsmodels.tools.sm_exceptions import ConvergenceWarning
assert_warns(ConvergenceWarning, nip.solve_power, 0.1, nobs1=None,
alpha=0.01, power=pow_, ratio=1, alternative='larger')
# this converges with scipy 0.11 ???
# nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1, alternative='larger')
with warnings.catch_warnings(): # python >= 2.6
warnings.simplefilter("ignore")
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_equal(nip.cache_fit_res[0], 0)
assert_equal(len(nip.cache_fit_res), 3)
if __name__ == '__main__':
test_normal_power_explicit()
nt = TestNormalIndPower1()
nt.test_power()
nt.test_roots()
nt = TestNormalIndPower_onesamp1()
nt.test_power()
nt.test_roots()
|
bsd-3-clause
|
nelson-liu/scikit-learn
|
examples/cluster/plot_kmeans_assumptions.py
|
270
|
2040
|
"""
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
|
bsd-3-clause
|
PanDAWMS/panda-jedi
|
pandajedi/jediddm/AtlasDDMClient.py
|
1
|
63500
|
import re
import os
import sys
import datetime
import json
import traceback
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
from six import iteritems
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from .DDMClientBase import DDMClientBase
from rucio.client import Client as RucioClient
from rucio.common.exception import UnsupportedOperation,DataIdentifierNotFound,DataIdentifierAlreadyExists,\
DuplicateRule,DuplicateContent, InvalidObject
from pandaserver.dataservice import DataServiceUtils
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# class to access to ATLAS DDM
class AtlasDDMClient(DDMClientBase):
# constructor
def __init__(self,con):
# initialize base class
DDMClientBase.__init__(self,con)
# the list of fatal error
self.fatalErrors = []
# list of blacklisted endpoints
self.blackListEndPoints = []
# time of last update for blacklist
self.lastUpdateBL = None
# how frequently update DN/token map
self.timeIntervalBL = datetime.timedelta(seconds=60*10)
# dict of endpoints
self.endPointDict = {}
# time of last update for endpoint dict
self.lastUpdateEP = None
# how frequently update endpoint dict
self.timeIntervalEP = datetime.timedelta(seconds=60*10)
# pid
self.pid = os.getpid()
# get a parsed certificate DN
def parse_dn(self, tmpDN):
if tmpDN is not None:
tmpDN = re.sub('/CN=limited proxy','',tmpDN)
tmpDN = re.sub('(/CN=proxy)+$', '', tmpDN)
# tmpDN = re.sub('(/CN=\d+)+$', '', tmpDN)
return tmpDN
# get files in dataset
def getFilesInDataset(self, datasetName, getNumEvents=False, skipDuplicate=True,
ignoreUnknown=False, longFormat=False, lfn_only=False):
methodName = 'getFilesInDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get Rucio API
client = RucioClient()
# extract scope from dataset
scope,dsn = self.extract_scope(datasetName)
if dsn.endswith('/'):
dsn = dsn[:-1]
# get files
fileMap = {}
baseLFNmap = {}
fileSet = set()
for x in client.list_files(scope, dsn, long=longFormat):
# convert to old dict format
lfn = str(x['name'])
if lfn_only:
fileSet.add(lfn)
continue
attrs = {}
attrs['lfn'] = lfn
attrs['chksum'] = "ad:" + str(x['adler32'])
attrs['md5sum'] = attrs['chksum']
attrs['checksum'] = attrs['chksum']
attrs['fsize'] = x['bytes']
attrs['filesize'] = attrs['fsize']
attrs['scope'] = str(x['scope'])
attrs['events'] = str(x['events'])
if longFormat:
attrs['lumiblocknr'] = str(x['lumiblocknr'])
guid = str('%s-%s-%s-%s-%s' % (x['guid'][0:8], x['guid'][8:12], x['guid'][12:16], x['guid'][16:20], x['guid'][20:32]))
attrs['guid'] = guid
# skip duplicated files
if skipDuplicate:
# extract base LFN and attempt number
baseLFN = re.sub('(\.(\d+))$','',lfn)
attNr = re.sub(baseLFN+'\.*','',lfn)
if attNr == '':
# without attempt number
attNr = -1
else:
attNr = int(attNr)
# compare attempt numbers
addMap = False
if baseLFN in baseLFNmap:
# use larger attempt number
oldMap = baseLFNmap[baseLFN]
if oldMap['attNr'] < attNr:
del fileMap[oldMap['guid']]
addMap = True
else:
addMap = True
# append
if not addMap:
continue
baseLFNmap[baseLFN] = {'guid':guid,
'attNr':attNr}
fileMap[guid] = attrs
tmpLog.debug('done')
if lfn_only:
return self.SC_SUCCEEDED, fileSet
return self.SC_SUCCEEDED, fileMap
except DataIdentifierNotFound as e:
if ignoreUnknown:
return self.SC_SUCCEEDED, {}
errType = e
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode,'{0} : {1}'.format(methodName, errMsg)
# list dataset replicas
def listDatasetReplicas(self, datasetName, use_vp=False, detailed=False):
methodName = 'listDatasetReplicas'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
if not datasetName.endswith('/'):
# get file list
tmpRet = self.convertOutListDatasetReplicas(datasetName, use_vp=use_vp)
tmpLog.debug('got new '+str(tmpRet))
if detailed:
return self.SC_SUCCEEDED, tmpRet, {datasetName: tmpRet}
return self.SC_SUCCEEDED, tmpRet
else:
# list of attributes summed up
retMap = {}
detailedRetMap = {}
# get constituent datasets
tmpS,dsList = self.listDatasetsInContainer(datasetName)
totalFiles = 0
grandTotal = 0
for tmpName in dsList:
tmpLog.debug(tmpName)
tmp_status, tmp_output = self.getDatasetMetaData(tmpName)
if tmp_status != self.SC_SUCCEEDED:
raise RuntimeError('failed to get metadata with {0}'.format(tmp_output))
try:
totalFiles = tmp_output['length']
if not totalFiles:
totalFiles = 0
except Exception:
totalFiles = 0
tmpRet = self.convertOutListDatasetReplicas(tmpName, use_vp=use_vp)
detailedRetMap[tmpName] = tmpRet
# loop over all sites
for tmpSite,tmpValMap in iteritems(tmpRet):
# add site
retMap.setdefault(tmpSite, [{'found': 0}])
# sum
try:
retMap[tmpSite][-1]['found'] += int(tmpValMap[-1]['found'])
except Exception:
pass
# total
try:
if totalFiles < int(tmpValMap[-1]['total']):
totalFiles = int(tmpValMap[-1]['total'])
except Exception:
pass
grandTotal += totalFiles
# set total
for tmpSite in retMap.keys():
retMap[tmpSite][-1]['total'] = grandTotal
# return
tmpLog.debug('got '+str(retMap))
if detailed:
return self.SC_SUCCEEDED, retMap, detailedRetMap
return self.SC_SUCCEEDED, retMap
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg + traceback.format_exc())
if detailed:
return errCode, '{0} : {1}'.format(methodName, errMsg), None
return errCode, '{0} : {1}'.format(methodName, errMsg)
# list replicas per dataset
def listReplicasPerDataset(self,datasetName,deepScan=False):
methodName = 'listReplicasPerDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start with deepScan={0}'.format(deepScan))
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
datasets = []
if not datasetName.endswith('/'):
datasets = [dsn]
else:
# get constituent datasets
itr = client.list_content(scope,dsn)
datasets = [i['name'] for i in itr]
retMap = {}
for tmpName in datasets:
retMap[tmpName] = self.convertOutListDatasetReplicas(tmpName,deepScan)
tmpLog.debug('got '+str(retMap))
return self.SC_SUCCEEDED,retMap
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# get site property
def getSiteProperty(self,seName,attribute):
methodName = 'getSiteProperty'
methodName += ' pid={0}'.format(self.pid)
self.updateEndPointDict()
try:
retVal = self.endPointDict[seName][attribute]
return self.SC_SUCCEEDED,retVal
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# get site alternateName
def getSiteAlternateName(self,se_name):
self.updateEndPointDict()
if se_name in self.endPointDict:
return [self.endPointDict[se_name]['site']]
return None
# get associated endpoints
def getAssociatedEndpoints(self,altName):
self.updateEndPointDict()
epList = []
for seName,seVal in iteritems(self.endPointDict):
if seVal['site'] == altName:
epList.append(seName)
return epList
# convert token to endpoint
def convertTokenToEndpoint(self,baseSeName,token):
self.updateEndPointDict()
try:
altName = self.getSiteAlternateName(baseSeName)[0]
if altName is not None:
for seName,seVal in iteritems(self.endPointDict):
if seVal['site'] == altName:
# space token
if seVal['token'] == token:
return seName
# pattern matching
if re.search(token,seName) is not None:
return seName
except Exception:
pass
return None
# get cloud for an endpoint
def getCloudForEndPoint(self,endPoint):
self.updateEndPointDict()
if endPoint in self.endPointDict:
return self.endPointDict[endPoint]['cloud']
return None
# check if endpoint is NG
def checkNGEndPoint(self,endPoint,ngList):
for ngPatt in ngList:
if re.search(ngPatt, endPoint) is not None:
return True
return False
def SiteHasCompleteReplica(self, dataset_replica_map, endpoint, total_files_in_dataset):
"""
Checks the #found files at site == #total files at site == #files in dataset. VP is regarded as complete
:return: True or False
"""
try:
if 'vp' in dataset_replica_map[endpoint][-1]:
if dataset_replica_map[endpoint][-1]['vp']:
return True
found_tmp = dataset_replica_map[endpoint][-1]['found']
total_tmp = dataset_replica_map[endpoint][-1]['total']
if found_tmp is not None and total_tmp == found_tmp and total_tmp >= total_files_in_dataset:
return True
except KeyError:
pass
return False
def getAvailableFiles(self, dataset_spec, site_endpoint_map, site_mapper, check_LFC=False,
check_completeness=True, storage_token=None, complete_only=False,
use_vp=True, file_scan_in_container=True):
"""
:param dataset_spec: dataset spec object
:param site_endpoint_map: panda sites to ddm endpoints map. The list of panda sites includes the ones to scan
:param site_mapper: site mapper object
:param check_LFC: check/ask Tadashi/probably obsolete
:param check_completeness:
:param storage_token:
:param complete_only: check only for complete replicas
:param use_vp: use virtual placement
:param file_scan_in_container: enable file lookup for container
TODO: do we need NG, do we need alternate names
TODO: the storage_token is not used anymore
:return:
"""
# make logger
method_name = 'getAvailableFiles'
method_name += ' pid={0}'.format(self.pid)
method_name += ' < jediTaskID={0} datasetID={1} >'.format(dataset_spec.jediTaskID, dataset_spec.datasetID)
tmp_log = MsgWrapper(logger, method_name)
loopStart = datetime.datetime.utcnow()
try:
tmp_log.debug('start datasetName={} check_completeness={} nFiles={} nSites={} '
'complete_only={}'.format(
dataset_spec.datasetName,
check_completeness,
len(dataset_spec.Files),
len(site_endpoint_map),
complete_only))
# update the definition of all endpoints from AGIS
self.updateEndPointDict()
# get the file map
tmp_status, tmp_output = self.getDatasetMetaData(dataset_spec.datasetName)
if tmp_status != self.SC_SUCCEEDED:
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.error('failed in {} sec to get metadata with {}'.format(regTime.seconds, tmp_output))
return tmp_status, tmp_output
total_files_in_dataset = tmp_output['length']
if total_files_in_dataset is None:
total_files_in_dataset = 0
if tmp_output['did_type'] == 'CONTAINER':
is_container = True
else:
is_container = False
# get the dataset replica map
tmp_status, tmp_output, detailed_replica_map = self.listDatasetReplicas(dataset_spec.datasetName,
use_vp=use_vp,
detailed=True)
if tmp_status != self.SC_SUCCEEDED:
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.error('failed in {} sec to get dataset replicas with {}'.format(regTime.seconds, tmp_output))
return tmp_status, tmp_output
dataset_replica_map = tmp_output
# collect GUIDs and LFNs
file_map = {} # GUID to LFN
lfn_filespec_map = {} # LFN to file spec
scope_map = {} # LFN to scope list
for tmp_file in dataset_spec.Files:
file_map[tmp_file.GUID] = tmp_file.lfn
lfn_filespec_map.setdefault(tmp_file.lfn, [])
lfn_filespec_map[tmp_file.lfn].append(tmp_file)
scope_map[tmp_file.lfn] = tmp_file.scope
complete_replica_map = {}
endpoint_storagetype_map = {}
rse_list = []
# figure out complete replicas and storage types
for site_name, endpoint_list in iteritems(site_endpoint_map):
tmp_site_spec = site_mapper.getSite(site_name)
has_complete = False
tmp_rse_list = []
# loop over all endpoints
for endpoint in endpoint_list:
# storage type
tmp_status, is_tape = self.getSiteProperty(endpoint, 'is_tape')
if is_tape:
storage_type = 'localtape'
else:
storage_type = 'localdisk'
if self.SiteHasCompleteReplica(dataset_replica_map, endpoint, total_files_in_dataset) \
or (endpoint in dataset_replica_map and not check_completeness) \
or DataServiceUtils.isCachedFile(dataset_spec.datasetName, tmp_site_spec):
complete_replica_map[endpoint] = storage_type
has_complete = True
# no scan for many-time datasets or disabled completeness check
if dataset_spec.isManyTime() \
or (not check_completeness and endpoint not in dataset_replica_map) \
or complete_only:
continue
# disable file lookup if unnecessary
if endpoint not in rse_list and (file_scan_in_container or not is_container):
tmp_rse_list.append(endpoint)
endpoint_storagetype_map[endpoint] = storage_type
# add to list to trigger file lookup if complete replica is unavailable
if not has_complete and tmp_rse_list:
rse_list += tmp_rse_list
# get the file locations from Rucio
if len(rse_list) > 0:
tmp_log.debug('lookup file replicas in Rucio for RSEs: {0}'.format(rse_list))
tmp_status, rucio_lfn_to_rse_map = self.jedi_list_replicas(file_map, rse_list, scopes=scope_map)
tmp_log.debug('lookup file replicas return status: {0}'.format(str(tmp_status)))
if tmp_status != self.SC_SUCCEEDED:
raise RuntimeError(rucio_lfn_to_rse_map)
else:
rucio_lfn_to_rse_map = dict()
if (not file_scan_in_container and is_container):
# make file list from detailed replica map
files_in_container = {}
for tmp_ds_name in detailed_replica_map.keys():
tmp_status, tmp_files = self.getFilesInDataset(tmp_ds_name, ignoreUnknown=True, lfn_only=True)
if tmp_status != self.SC_SUCCEEDED:
raise RuntimeError(tmp_files)
for tmp_lfn in tmp_files:
files_in_container[tmp_lfn] = tmp_ds_name
for tmp_file in dataset_spec.Files:
if tmp_file.lfn in files_in_container and \
files_in_container[tmp_file.lfn] in detailed_replica_map:
rucio_lfn_to_rse_map[tmp_file.lfn] = detailed_replica_map[files_in_container[tmp_file.lfn]]
# initialize the return map and add complete/cached replicas
return_map = {}
checked_dst = set()
for site_name, tmp_endpoints in iteritems(site_endpoint_map):
return_map.setdefault(site_name, {'localdisk': [], 'localtape': [], 'cache': [], 'remote': []})
tmp_site_spec = site_mapper.getSite(site_name)
# check if the dataset is cached
if DataServiceUtils.isCachedFile(dataset_spec.datasetName, tmp_site_spec):
# add to cached file list
return_map[site_name]['cache'] += dataset_spec.Files
# complete replicas
if not check_LFC:
for tmp_endpoint in tmp_endpoints:
if tmp_endpoint in complete_replica_map:
storage_type = complete_replica_map[tmp_endpoint]
return_map[site_name][storage_type] += dataset_spec.Files
checked_dst.add(site_name)
# loop over all available LFNs
available_lfns = list(rucio_lfn_to_rse_map.keys())
available_lfns.sort()
for tmp_lfn in available_lfns:
tmp_filespec_list = lfn_filespec_map[tmp_lfn]
tmp_filespec = lfn_filespec_map[tmp_lfn][0]
for site in site_endpoint_map:
for endpoint in site_endpoint_map[site]:
if endpoint in rucio_lfn_to_rse_map[tmp_lfn] and endpoint in endpoint_storagetype_map:
storage_type = endpoint_storagetype_map[endpoint]
if tmp_filespec not in return_map[site][storage_type]:
return_map[site][storage_type] += tmp_filespec_list
checked_dst.add(site)
break
# aggregate all types of storage types into the 'all' key
for site, storage_type_files in iteritems(return_map):
site_all_file_list = set()
for storage_type, file_list in iteritems(storage_type_files):
for tmp_file_spec in file_list:
site_all_file_list.add(tmp_file_spec)
storage_type_files['all'] = site_all_file_list
# dump for logging
logging_str = ''
for site, storage_type_file in iteritems(return_map):
logging_str += '{0}:('.format(site)
for storage_type, file_list in iteritems(storage_type_file):
logging_str += '{0}:{1},'.format(storage_type, len(file_list))
logging_str = logging_str[:-1]
logging_str += ') '
logging_str = logging_str[:-1]
tmp_log.debug(logging_str)
# return
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.debug('done in {} sec'.format(regTime.seconds))
return self.SC_SUCCEEDED, return_map
except Exception as e:
regTime = datetime.datetime.utcnow() - loopStart
error_message = 'failed in {} sec with {} {} '.format(regTime.seconds, str(e), traceback.format_exc())
tmp_log.error(error_message)
return self.SC_FAILED, '{0}.{1} {2}'.format(self.__class__.__name__, method_name, error_message)
def jedi_list_replicas(self, files, storages, scopes={}):
try:
method_name = 'jedi_list_replicas'
method_name += ' pid={0}'.format(self.pid)
tmp_log = MsgWrapper(logger, method_name)
client = RucioClient()
i_guid = 0
max_guid = 1000 # do 1000 guids in each Rucio call
lfn_to_rses_map = {}
dids = []
i_loop = 0
startTime = datetime.datetime.utcnow()
tmp_log.debug('start')
for guid, lfn in iteritems(files):
i_guid += 1
scope = scopes[lfn]
dids.append({'scope': scope, 'name': lfn})
if len(dids) % max_guid == 0 or i_guid == len(files):
i_loop += 1
tmp_log.debug('lookup {} start'.format(i_loop))
loopStart = datetime.datetime.utcnow()
x = client.list_replicas(dids, ['srm', 'gsiftp'], resolve_archives=True)
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.info('rucio.list_replicas took {0} sec for {1} files'.format(regTime.seconds, len(dids)))
loopStart = datetime.datetime.utcnow()
for tmp_dict in x:
try:
tmp_LFN = str(tmp_dict['name'])
lfn_to_rses_map[tmp_LFN] = tmp_dict['rses']
except Exception:
pass
# reset the dids list for the next bulk for Rucio
dids = []
regTime = datetime.datetime.utcnow() - loopStart
tmp_log.debug('lookup {} end in {} sec'.format(i_loop, regTime.seconds))
regTime = datetime.datetime.utcnow() - startTime
tmp_log.debug('end in {} sec'.format(regTime.seconds))
except Exception as e:
regTime = datetime.datetime.utcnow() - startTime
tmp_log.error('failed in {} sec'.format(regTime.seconds))
return self.SC_FAILED, "file lookup failed with {} {}".format(str(e), traceback.format_exc())
return self.SC_SUCCEEDED, lfn_to_rses_map
# list file replicas with dataset name/scope
def jedi_list_replicas_with_dataset(self, datasetName):
try:
scope, dsn = self.extract_scope(datasetName)
client = RucioClient()
lfn_to_rses_map = {}
dids = []
dids = [{'scope': scope, 'name': dsn}]
for tmp_dict in client.list_replicas(dids, ['srm', 'gsiftp'], resolve_archives=True):
try:
tmp_LFN = str(tmp_dict['name'])
except Exception:
continue
lfn_to_rses_map[tmp_LFN] = tmp_dict['rses']
except Exception:
err_type, err_value = sys.exc_info()[:2]
return self.SC_FAILED, "file lookup failed with {0}:{1} {2}".format(err_type, err_value, traceback.format_exc())
return self.SC_SUCCEEDED, lfn_to_rses_map
# get dataset metadata
def getDatasetMetaData(self, datasetName, ignore_missing=False):
# make logger
methodName = 'getDatasetMetaData'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get
if dsn.endswith('/'):
dsn = dsn[:-1]
tmpRet = client.get_metadata(scope,dsn)
# set state
if tmpRet['is_open'] is True and tmpRet['did_type'] != 'CONTAINER':
tmpRet['state'] = 'open'
else:
tmpRet['state'] = 'closed'
tmpLog.debug(str(tmpRet))
return self.SC_SUCCEEDED,tmpRet
except DataIdentifierNotFound as e:
errType = e
errCode, errMsg = self.checkError(errType)
if ignore_missing:
tmpLog.debug(errMsg)
tmpRet = {}
tmpRet['state'] = 'missing'
return self.SC_SUCCEEDED, tmpRet
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# check error
def checkError(self,errType):
errMsg = '{} : {}'.format(str(type(errType)), str(errType))
if type(errType) in self.fatalErrors:
# fatal error
return self.SC_FATAL, errMsg
else:
# temporary error
return self.SC_FAILED, errMsg
# list dataset/container
def listDatasets(self,datasetName,ignorePandaDS=True):
methodName = 'listDatasets'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
filters = {}
if dsn.endswith('/'):
dsn = dsn[:-1]
filters['name'] = dsn
dsList = set()
for name in client.list_dids(scope, filters, 'dataset'):
dsList.add('%s:%s' % (scope, name))
for name in client.list_dids(scope, filters, 'container'):
dsList.add('%s:%s/' % (scope, name))
dsList = list(dsList)
# ignore panda internal datasets
if ignorePandaDS:
tmpDsList = []
for tmpDS in dsList:
if re.search('_dis\d+$',tmpDS) is not None or re.search('_sub\d+$',tmpDS):
continue
tmpDsList.append(tmpDS)
dsList = tmpDsList
tmpLog.debug('got '+str(dsList))
return self.SC_SUCCEEDED,dsList
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# register new dataset/container
def registerNewDataset(self,datasetName,backEnd='rucio',location=None,lifetime=None,metaData=None,resurrect=False):
methodName = 'registerNewDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start location={0} lifetime={1}'.format(location,lifetime))
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# lifetime
if lifetime is not None:
lifetime=lifetime*86400
# register
if not datasetName.endswith('/'):
# register dataset
name = dsn
client.add_dataset(scope,name,meta=metaData,lifetime=lifetime,rse=location)
else:
# register container
name = dsn
client.add_container(scope=scope,name=name)
except DataIdentifierAlreadyExists:
pass
except InvalidObject as e:
errMsg = '{} : {}'.format(InvalidObject, str(e))
tmpLog.error(errMsg)
return self.SC_FATAL, '{0} : {1}'.format(methodName, errMsg)
except Exception as e:
errType = e
resurrected = False
# try to resurrect
if 'DELETED_DIDS_PK violated' in str(errType) and resurrect:
try:
client.resurrect([{'scope': scope, 'name': name}])
resurrected = True
except Exception:
pass
if not resurrected:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode,'{0} : {1}'.format(methodName,errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# wrapper for list_content
def wp_list_content(self,client,scope,dsn):
if dsn.endswith('/'):
dsn = dsn[:-1]
retList = []
# get contents
for data in client.list_content(scope,dsn):
if data['type'] == 'CONTAINER':
retList += self.wp_list_content(client,data['scope'],data['name'])
elif data['type'] == 'DATASET':
retList.append('{0}:{1}'.format(data['scope'],data['name']))
else:
pass
return retList
# list datasets in container
def listDatasetsInContainer(self,containerName):
methodName = 'listDatasetsInContainer'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <containerName={0}>'.format(containerName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(containerName)
# get contents
dsList = self.wp_list_content(client,scope,dsn)
tmpLog.debug('got '+str(dsList))
return self.SC_SUCCEEDED,dsList
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# expand Container
def expandContainer(self,containerName):
methodName = 'expandContainer'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <contName={0}>'.format(containerName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
dsList = []
# get real names
tmpS,tmpRealNameList = self.listDatasets(containerName)
if tmpS != self.SC_SUCCEEDED:
tmpLog.error('failed to get real names')
return tmpS,tmpRealNameList
# loop over all names
for tmpRealName in tmpRealNameList:
# container
if tmpRealName.endswith('/'):
# get contents
tmpS,tmpO = self.listDatasetsInContainer(tmpRealName)
if tmpS != self.SC_SUCCEEDED:
tmpLog.error('failed to get datasets in {0}'.format(tmpRealName))
return tmpS,tmpO
else:
tmpO = [tmpRealName]
# collect dataset names
for tmpStr in tmpO:
if tmpStr not in dsList:
dsList.append(tmpStr)
dsList.sort()
# return
tmpLog.debug('got {0}'.format(str(dsList)))
return self.SC_SUCCEEDED,dsList
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# add dataset to container
def addDatasetsToContainer(self,containerName,datasetNames,backEnd='rucio'):
methodName = 'addDatasetsToContainer'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <contName={0}>'.format(containerName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get Rucio API
client = RucioClient()
c_scope,c_name = self.extract_scope(containerName)
if c_name.endswith('/'):
c_name = c_name[:-1]
dsns = []
for ds in datasetNames:
ds_scope, ds_name = self.extract_scope(ds)
dsn = {'scope': ds_scope, 'name': ds_name}
dsns.append(dsn)
try:
# add datasets
client.add_datasets_to_container(scope=c_scope, name=c_name, dsns=dsns)
except DuplicateContent:
# add datasets one by one
for ds in dsns:
try:
client.add_datasets_to_container(scope=c_scope, name=c_name, dsns=[ds])
except DuplicateContent:
pass
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# get latest DBRelease
def getLatestDBRelease(self):
methodName = 'getLatestDBRelease'
methodName += ' pid={0}'.format(self.pid)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('trying to get the latest version number of DBR')
# get ddo datasets
tmpStat,ddoDatasets = self.listDatasets('ddo.*')
if tmpStat != self.SC_SUCCEEDED or ddoDatasets == {}:
tmpLog.error('failed to get a list of DBRelease datasets from DDM')
return self.SC_FAILED,None
# reverse sort to avoid redundant lookup
ddoDatasets.sort()
ddoDatasets.reverse()
# extract version number
latestVerMajor = 0
latestVerMinor = 0
latestVerBuild = 0
latestVerRev = 0
latestDBR = ''
for tmpName in ddoDatasets:
# ignore CDRelease
if ".CDRelease." in tmpName:
continue
# ignore user
if tmpName.startswith('ddo.user'):
continue
# use Atlas.Ideal
if ".Atlas.Ideal." not in tmpName:
continue
match = re.search('\.v(\d+)(_*[^\.]*)$',tmpName)
if match is None:
tmpLog.warning('cannot extract version number from %s' % tmpName)
continue
# ignore special DBRs
if match.group(2) != '':
continue
# get major,minor,build,revision numbers
tmpVerStr = match.group(1)
tmpVerMajor = 0
tmpVerMinor = 0
tmpVerBuild = 0
tmpVerRev = 0
try:
tmpVerMajor = int(tmpVerStr[0:2])
except Exception:
pass
try:
tmpVerMinor = int(tmpVerStr[2:4])
except Exception:
pass
try:
tmpVerBuild = int(tmpVerStr[4:6])
except Exception:
pass
try:
tmpVerRev = int(tmpVerStr[6:])
# use only three digit DBR
continue
except Exception:
pass
# compare
if latestVerMajor > tmpVerMajor:
continue
elif latestVerMajor == tmpVerMajor:
if latestVerMinor > tmpVerMinor:
continue
elif latestVerMinor == tmpVerMinor:
if latestVerBuild > tmpVerBuild:
continue
elif latestVerBuild == tmpVerBuild:
if latestVerRev > tmpVerRev:
continue
# check if well replicated
tmpStat,ddoReplicas = self.listDatasetReplicas(tmpName)
if ddoReplicas == []:
continue
# higher or equal version
latestVerMajor = tmpVerMajor
latestVerMinor = tmpVerMinor
latestVerBuild = tmpVerBuild
latestVerRev = tmpVerRev
latestDBR = tmpName
# failed
if latestDBR == '':
tmpLog.error('failed to get the latest version of DBRelease dataset from DDM')
return self.SC_FAILED,None
tmpLog.debug('use {0}'.format(latestDBR))
return self.SC_SUCCEEDED,latestDBR
# freeze dataset
def freezeDataset(self,datasetName,ignoreUnknown=False):
methodName = 'freezeDataset'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# check metadata to avoid a bug in rucio
if dsn.endswith('/'):
dsn = dsn[:-1]
tmpRet = client.get_metadata(scope,dsn)
# close
client.set_status(scope,dsn,open=False)
except UnsupportedOperation:
pass
except DataIdentifierNotFound as e:
errType = e
if ignoreUnknown:
pass
else:
isOK = False
except Exception as e:
errType = e
isOK = False
if isOK:
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
else:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# finger
def finger(self,userName):
methodName = 'finger'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} userName={1}'.format(methodName,userName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# cleanup DN
userName = self.parse_dn(userName)
# get rucio API
client = RucioClient()
userInfo = None
for accType in ['USER', 'GROUP']:
for i in client.list_accounts(account_type=accType, identity=userName):
userInfo = {'nickname':i['account'],
'email':i['email']}
break
if userInfo is None:
# remove /CN=\d
userName = re.sub('(/CN=\d+)+$','',userName)
for i in client.list_accounts(account_type=accType, identity=userName):
userInfo = {'nickname':i['account'],
'email':i['email']}
break
try:
if userInfo is None:
i = client.get_account(userName)
userInfo = {'nickname': i['account'],
'email': i['email']}
except Exception:
pass
if userInfo is not None:
break
if userInfo is None:
tmpLog.error('failed to get account info')
return self.SC_FAILED,None
tmpRet = userInfo
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done with '+str(tmpRet))
return self.SC_SUCCEEDED,tmpRet
# set dataset metadata
def setDatasetMetadata(self,datasetName,metadataName,metadaValue):
methodName = 'setDatasetMetadata'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1} metadataName={2} metadaValue={3}'.format(methodName,datasetName,
metadataName,metadaValue)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# set
client.set_metadata(scope,dsn,metadataName,metadaValue)
except (UnsupportedOperation,DataIdentifierNotFound):
pass
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# register location
def registerDatasetLocation(self,datasetName,location,lifetime=None,owner=None,backEnd='rucio',
activity=None,grouping=None,weight=None,copies=1,
ignore_availability=True):
methodName = 'registerDatasetLocation'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1} location={2}'.format(methodName,datasetName,location)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get rucio API
client = RucioClient()
# cleanup DN
owner = self.parse_dn(owner)
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# lifetime
if lifetime is not None:
lifetime = lifetime * 86400
elif 'SCRATCHDISK' in location:
lifetime = 14 * 86400
# get owner
if owner is not None:
tmpStat,userInfo = self.finger(owner)
if tmpStat != self.SC_SUCCEEDED:
raise RuntimeError('failed to get nickname for {0}'.format(owner))
owner = userInfo['nickname']
else:
owner = client.account
if grouping is None:
grouping = 'DATASET'
# add rule
dids = []
did = {'scope': scope, 'name': dsn}
dids.append(did)
locList = location.split(',')
for tmpLoc in locList:
client.add_replication_rule(dids=dids,copies=copies,rse_expression=tmpLoc,lifetime=lifetime,
grouping=grouping,account=owner,locked=False,notify='N',
ignore_availability=ignore_availability,activity=activity,
weight=weight)
except DuplicateRule:
pass
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# delete dataset
def deleteDataset(self,datasetName,emptyOnly,ignoreUnknown=False):
methodName = 'deleteDataset'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
retStr = ''
nFiles = -1
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get the number of files
if emptyOnly:
nFiles = 0
for x in client.list_files(scope, dsn):
nFiles += 1
# erase
if not emptyOnly or nFiles == 0:
client.set_metadata(scope=scope, name=dsn, key='lifetime', value=0.0001)
retStr = 'deleted {0}'.format(datasetName)
else:
retStr = 'keep {0} where {1} files are available'.format(datasetName,nFiles)
except DataIdentifierNotFound as e:
errType = e
if ignoreUnknown:
pass
else:
isOK = False
except Exception as e:
isOK = False
errType = e
if isOK:
tmpLog.debug('done')
return self.SC_SUCCEEDED,retStr
else:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode,'{0} : {1}'.format(methodName, errMsg)
# register subscription
def registerDatasetSubscription(self,datasetName,location,activity,lifetime=None,
asynchronous=False):
methodName = 'registerDatasetSubscription'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1} location={2} activity={3} asyn={4}'.format(methodName,datasetName,
location,activity,asynchronous)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
if lifetime is not None:
lifetime = lifetime*24*60*60
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
dids = [{'scope': scope, 'name': dsn}]
# check if a replication rule already exists
for rule in client.list_did_rules(scope=scope, name=dsn):
if (rule['rse_expression'] == location) and (rule['account'] == client.account):
return True
client.add_replication_rule(dids=dids,copies=1,rse_expression=location,weight=None,
lifetime=lifetime, grouping='DATASET', account=client.account,
locked=False, notify='N',ignore_availability=True,
activity=activity,asynchronous=asynchronous)
except DuplicateRule:
pass
except DataIdentifierNotFound:
pass
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# find lost files
def findLostFiles(self, datasetName, fileMap):
methodName = 'findLostFiles'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
try:
# get replicas
tmpStat,tmpOut = self.listDatasetReplicas(datasetName)
if tmpStat != self.SC_SUCCEEDED:
tmpLog.error('faild to get dataset replicas with {0}'.format(tmpOut))
return tmpStat,tmpOut
# check if complete replica is available
hasCompReplica = False
datasetReplicaMap = tmpOut
for tmpEndPoint in datasetReplicaMap.keys():
if datasetReplicaMap[tmpEndPoint][-1]['found'] is not None and \
datasetReplicaMap[tmpEndPoint][-1]['total'] == datasetReplicaMap[tmpEndPoint][-1]['found']:
hasCompReplica = True
break
# no lost files
if hasCompReplica:
tmpLog.debug('done with no lost files')
return self.SC_SUCCEEDED,{}
# get LFNs and scopes
lfnMap = {}
scopeMap = {}
for tmpGUID in fileMap.keys():
tmpLFN = fileMap[tmpGUID]['lfn']
lfnMap[tmpGUID] = tmpLFN
scopeMap[tmpLFN] = fileMap[tmpGUID]['scope']
# get SURLs
seList = list(datasetReplicaMap.keys())
tmpStat, tmpRetMap = self.jedi_list_replicas_with_dataset(datasetName)
if tmpStat != self.SC_SUCCEEDED:
tmpLog.error('failed to get SURLs with {0}'.format(tmpRetMap))
return tmpStat,tmpRetMap
# look for missing files
lfnMap = {}
for tmpGUID,tmpLFN in iteritems(lfnMap):
if tmpLFN not in tmpRetMap:
lfnMap[tmpGUID] = tmpLFN
tmpLog.debug('done with lost '+','.join(str(tmpLFN) for tmpLFN in lfnMap.values()))
return self.SC_SUCCEEDED,lfnMap
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
# convert output of listDatasetReplicas
def convertOutListDatasetReplicas(self, datasetName, usefileLookup=False, use_vp=False):
retMap = {}
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get replicas
itr = client.list_dataset_replicas(scope,dsn,deep=usefileLookup)
items = []
for item in itr:
if 'vp' not in item:
item['vp'] = False
items.append(item)
# deep lookup if shallow gave nothing
if items == [] and not usefileLookup:
itr = client.list_dataset_replicas(scope,dsn,deep=True)
for item in itr:
if 'vp' not in item:
item['vp'] = False
items.append(item)
# VP
if use_vp:
itr = client.list_dataset_replicas_vp(scope, dsn)
for item in itr:
if item['vp']:
# add dummy
if "length" not in item:
item["length"] = 1
if "available_length" not in item:
item["available_length"] = 1
if "bytes" not in item:
item["bytes"] = 1
if "available_bytes" not in item:
item["available_bytes"] = 1
if "site" in item and "rse" not in item:
item["rse"] = item["site"]
items.append(item)
for item in items:
rse = item["rse"]
retMap[rse] = [{'total':item["length"],
'found':item["available_length"],
'tsize':item["bytes"],
'asize':item["available_bytes"],
'vp': item["vp"],
'immutable':1}]
return retMap
# delete files from dataset
def deleteFilesFromDataset(self,datasetName,filesToDelete):
methodName = 'deleteFilesFromDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# open dataset
try:
client.set_status(scope,dsn,open=True)
except UnsupportedOperation:
pass
# exec
client.detach_dids(scope=scope, name=dsn, dids=filesToDelete)
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# extract scope
def extract_scope(self, dsn):
if dsn.endswith('/'):
dsn = re.sub('/$', '', dsn)
if ':' in dsn:
return dsn.split(':')[:2]
scope = dsn.split('.')[0]
if dsn.startswith('user') or dsn.startswith('group'):
scope = ".".join(dsn.split('.')[0:2])
return scope,dsn
# open dataset
def openDataset(self,datasetName):
methodName = 'openDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# open dataset
try:
client.set_status(scope,dsn,open=True)
except (UnsupportedOperation,DataIdentifierNotFound):
pass
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# update backlist
def updateBlackList(self):
methodName = 'updateBlackList'
methodName += ' pid={0}'.format(self.pid)
tmpLog = MsgWrapper(logger,methodName)
# check freashness
timeNow = datetime.datetime.utcnow()
if self.lastUpdateBL is not None and timeNow-self.lastUpdateBL < self.timeIntervalBL:
return
self.lastUpdateBL = timeNow
# get json
try:
tmpLog.debug('start')
with open('/cvmfs/atlas.cern.ch/repo/sw/local/etc/cric_ddmblacklisting.json') as f:
ddd = json.load(f)
self.blackListEndPoints = \
[k for k in ddd if 'write_wan' in ddd[k] and ddd[k]['write_wan']["status"]["value"] == 'OFF']
tmpLog.debug('{0} endpoints blacklisted'.format(len(self.blackListEndPoints)))
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
return
# check if the endpoint is backlisted
def isBlackListedEP(self,endPoint):
methodName = 'isBlackListedEP'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <endPoint={0}>'.format(endPoint)
tmpLog = MsgWrapper(logger,methodName)
try:
# update BL
self.updateBlackList()
if endPoint in self.blackListEndPoints:
return self.SC_SUCCEEDED,True
except Exception:
pass
return self.SC_SUCCEEDED,False
# get disk usage at RSE
def getRseUsage(self,rse,src='srm'):
methodName = 'getRseUsage'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <rse={0}>'.format(rse)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
retMap = {}
try:
# get rucio API
client = RucioClient()
# get info
itr = client.get_rse_usage(rse)
# look for srm
for item in itr:
if item['source'] == src:
try:
total = item['total']/1024/1024/1024
except Exception:
total = None
try:
used = item['used']/1024/1024/1024
except Exception:
used = None
try:
free = item['free']/1024/1024/1024
except Exception:
free = None
retMap = {'total':total,
'used':used,
'free':free}
break
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done {0}'.format(str(retMap)))
return self.SC_SUCCEEDED,retMap
# update endpoint dict
def updateEndPointDict(self):
methodName = 'updateEndPointDict'
methodName += ' pid={0}'.format(self.pid)
tmpLog = MsgWrapper(logger,methodName)
# check freshness
timeNow = datetime.datetime.utcnow()
if self.lastUpdateEP is not None and timeNow-self.lastUpdateEP < self.timeIntervalEP:
return
self.lastUpdateEP = timeNow
# get json
try:
tmpLog.debug('start')
with open('/cvmfs/atlas.cern.ch/repo/sw/local/etc/cric_ddmendpoints.json') as f:
ddd = json.load(f)
self.endPointDict = {k: ddd[k] for k in ddd if ddd[k]['state'] == 'ACTIVE'}
tmpLog.debug('got {0} endpoints '.format(len(self.endPointDict)))
except Exception as e:
errStr = 'failed to update EP with {0}'.format(str(e))
tmpLog.error(errStr)
return
# check if the dataset is distributed
def isDistributedDataset(self,datasetName):
methodName = 'isDistributedDataset'
methodName += ' pid={0}'.format(self.pid)
methodName += ' <datasetName={0}>'.format(datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isDDS = None
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get rules
for rule in client.list_did_rules(scope,dsn):
if rule['grouping'] != 'NONE':
isDDS = False
break
elif isDDS is None:
isDDS = True
# use False when there is no rule
if isDDS is None:
isDDS = False
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done with {0}'.format(isDDS))
return self.SC_SUCCEEDED,isDDS
# update replication rules
def updateReplicationRules(self,datasetName,dataMap):
methodName = 'updateReplicationRules'
methodName += ' pid={0}'.format(self.pid)
methodName = '{0} datasetName={1}'.format(methodName,datasetName)
tmpLog = MsgWrapper(logger,methodName)
tmpLog.debug('start')
isOK = True
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(datasetName)
# get rules
for rule in client.list_did_rules(scope=scope, name=dsn):
for dataKey,data in iteritems(dataMap):
if rule['rse_expression'] == dataKey or re.search(dataKey,rule['rse_expression']) is not None:
tmpLog.debug('set data={0} on {1}'.format(str(data),rule['rse_expression']))
client.update_replication_rule(rule['id'],data)
except DataIdentifierNotFound:
pass
except Exception as e:
isOK = False
errType = e
if not isOK:
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('done')
return self.SC_SUCCEEDED,True
# get active staging rule
def getActiveStagingRule(self, dataset_name):
methodName = 'getActiveStagingRule'
methodName += ' datasetName={0}'.format(dataset_name)
tmpLog = MsgWrapper(logger, methodName)
tmpLog.debug('start')
ruleID = None
try:
# get rucio API
client = RucioClient()
# get scope and name
scope,dsn = self.extract_scope(dataset_name)
# get rules
for rule in client.list_did_rules(scope=scope, name=dsn):
if rule['activity'] == 'Staging':
ruleID = rule['id']
break
except Exception as e:
errType = e
errCode, errMsg = self.checkError(errType)
tmpLog.error(errMsg)
return errCode, '{0} : {1}'.format(methodName, errMsg)
tmpLog.debug('got ruleID={0}'.format(ruleID))
return self.SC_SUCCEEDED, ruleID
|
apache-2.0
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/analysis/heating/projected.py
|
1
|
18872
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.analysis.heating.projected Contains the ProjectedDustHeatingAnalyser class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
import matplotlib.pyplot as plt
# Import the relevant PTS classes and modules
from .component import DustHeatingAnalysisComponent
from ....core.tools import filesystem as fs
from ....core.tools.logging import log
from ....core.tools import tables, introspection
from ...core.sed import SED
from ....core.simulation.wavelengthgrid import WavelengthGrid
from ....magic.core.image import Image
from ....magic.plot.imagegrid import StandardImageGridPlotter
# -----------------------------------------------------------------
class ProjectedDustHeatingAnalyser(DustHeatingAnalysisComponent):
"""
This class...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(ProjectedDustHeatingAnalyser, self).__init__(config)
# -- Attributes --
# The wavelength grid used for the simulations
self.wavelength_grid = None
# The number of wavelengths
self.number_of_wavelengths = None
# The SEDs
self.total_sed = None
self.evolved_sed = None
self.unevolved_sed = None
# The datacubes
self.total_datacube = None
self.evolved_datacube = None
self.unevolved_datacube = None
# The flux fractions
self.fractions = None
self.fraction_maps = dict()
# The TIR maps
self.total_tir_map = None
self.evolved_tir_map = None
self.unevolved_tir_map = None
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
:return:
"""
# Create a new ProjectedDustHeatingAnalyser instance
analyser = cls()
# Set the modeling path
analyser.config.path = arguments.path
# Return the new instance
return analyser
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the wavelength grid
self.load_wavelength_grid()
# 3. Load the simulated SEDs
self.load_seds()
# 4. Load the simulated data cubes
self.load_datacubes()
# 5. Calculate the heating fractions
self.calculate_heating_fractions()
# 6. Calculate maps of the TIR luminosity
self.calculate_tir_maps()
# 6. Writing
self.write()
# 7. Plotting
self.plot()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(ProjectedDustHeatingAnalyser, self).setup()
# -----------------------------------------------------------------
def load_wavelength_grid(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the wavelength grid file produced by SKIRT ...")
# Determine the path to the wavelength grid file in heating/total
wavelengths_path = fs.join(self.output_paths["total"], self.galaxy_name + "_wavelengths.dat")
# Load the wavelength grid as a table
self.wavelength_grid = WavelengthGrid.from_skirt_output(wavelengths_path)
# Determine the number of wavelengths
self.number_of_wavelengths = len(self.wavelength_grid)
# -----------------------------------------------------------------
def load_seds(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the simulated SEDs ...")
# Load the SED of the simulation with the total stellar population
self.load_total_sed()
# Load the SED of the simulation with the evolved stellar population
self.load_evolved_sed()
# Load the SED of the simulation with the unevolved stellar populations
self.load_unevolved_sed()
# -----------------------------------------------------------------
def load_total_sed(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the SED of the simulation with the total stellar population ...")
# Determine the path to the SED file
path = fs.join(self.output_paths["total"], self.galaxy_name + "_earth_sed.dat")
# Load the SED
self.total_sed = SED.from_skirt(path)
# -----------------------------------------------------------------
def load_evolved_sed(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the SED of the simulation with the evolved stellar population ...")
# Determine the path to the SED file
path = fs.join(self.output_paths["old"], self.galaxy_name + "_earth_sed.dat")
# Load the SED
self.evolved_sed = SED.from_skirt(path)
# -----------------------------------------------------------------
def load_unevolved_sed(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the SED of the simulation with the unevolved stellar populations ...")
# Determine the path to the SED file
path = fs.join(self.output_paths["unevolved"], self.galaxy_name + "_earth_sed.dat")
# Load the SED
self.unevolved_sed = SED.from_skirt(path)
# -----------------------------------------------------------------
def load_datacubes(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the simulated datacubes ...")
#
self.load_total_datacube()
self.load_evolved_datacube()
self.load_unevolved_datacube()
# -----------------------------------------------------------------
def load_total_datacube(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the datacube of the simulation with the total stellar population ...")
# Determine the path to the datacube
path = fs.join(self.output_paths["total"], self.galaxy_name + "_earth_total.fits")
# Load the datacube
self.total_datacube = Image.from_file(path)
# -----------------------------------------------------------------
def load_evolved_datacube(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the datacube of the simulation with the evolved stellar population ...")
# Determine the path to the datacube
path = fs.join(self.output_paths["evolved"], self.galaxy_name + "_earth_total.fits")
# Load the datacube
self.evolved_datacube = Image.from_file(path)
# -----------------------------------------------------------------
def load_unevolved_datacube(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the datacube of the simulation with the unevolved stellar populations ...")
# Determine the path to the datacube
path = fs.join(self.output_paths["unevolved"], self.galaxy_name + "_earth_total.fits")
# Load the datacube
self.unevolved_datacube = Image.from_file(path)
# -----------------------------------------------------------------
def calculate_heating_fractions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the heating fractions ...")
# Calculate the flux fractions of the evolved and unevolved stellar populations over the wavelength spectrum
self.calculate_sed_heating_fractions()
# Calculate the flux fractions of the evolved and unevolved stellar population for selected wavelenghts in each pixel
self.calculate_pixel_heating_fractions()
# -----------------------------------------------------------------
def calculate_sed_heating_fractions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the flux fractions over the wavelength spectrum ...")
# ...
all_wavelengths = self.wavelength_grid.wavelengths(asarray=True, unit="micron")
dust_wavelengths = all_wavelengths > 10.
total_fluxes = self.total_sed.fluxes(asarray=True)
evolved_fluxes = self.evolved_sed.fluxes(asarray=True)
unevolved_fluxes = self.unevolved_sed.fluxes(asarray=True)
# Calculate the heating fractions
unevolved_fractions = 0.5 * (unevolved_fluxes + total_fluxes - evolved_fluxes) / total_fluxes
evolved_fractions = 0.5 * (evolved_fluxes + total_fluxes - unevolved_fluxes) / total_fluxes
# Create the table of flux fractions
names = ["Wavelength", "Flux fraction from unevolved stellar populations",
"Flux fraction from evolved stellar population"]
data = [all_wavelengths[dust_wavelengths], unevolved_fractions[dust_wavelengths],
evolved_fractions[dust_wavelengths]]
self.fractions = tables.new(data, names)
# -----------------------------------------------------------------
def calculate_pixel_heating_fractions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the flux fractions for selected wavelengths in each pixel ...")
# The interesting wavelengths
wavelengths = [12., 24., 70., 100., 250., 350., 500.]
# Loop over the interesting wavelengths
for wavelength in wavelengths:
# Get the index of the wavelength grid point closest to this wavelength
index = self.wavelength_grid.closest_wavelength_index(wavelength)
wavelength = self.wavelength_grid.table["Wavelength"][index]
# Determine the name of the corresponding frame in the datacube image
frame_name = "frame" + str(index)
total_fluxes = self.total_datacube.frames[frame_name]
evolved_fluxes = self.evolved_datacube.frames[frame_name]
unevolved_fluxes = self.unevolved_datacube.frames[frame_name]
# Calculate the heating fractions
unevolved_fractions = 0.5 * (unevolved_fluxes + total_fluxes - evolved_fluxes) / total_fluxes
#evolved_fractions = 0.5 * (evolved_fluxes + total_fluxes - unevolved_fluxes) / total_fluxes
# Add the fraction map to the dictionary
self.fraction_maps[wavelength] = unevolved_fractions
# -----------------------------------------------------------------
def calculate_tir_maps(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the TIR luminosity in each pixel ...")
# Calculate the TIR maps
self.calculate_total_tir_map()
self.calculate_unevolved_tir_map()
self.calculate_evolved_tir_map()
# -----------------------------------------------------------------
def calculate_total_tir_map(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the total TIR luminosity in each pixel ...")
cube = self.total_datacube.asarray()
wavelengths = self.wavelength_grid.wavelengths(asarray=True, unit="micron")
deltas = self.wavelength_grid.deltas(asarray=True, unit="micron")
# Calculate the map
self.total_tir_map = integrate_pixel_seds(cube, wavelengths, deltas)
# -----------------------------------------------------------------
def calculate_unevolved_tir_map(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the unevolved TIR luminosity in each pixel ...")
cube = self.unevolved_datacube.asarray()
wavelengths = self.wavelength_grid.wavelengths(asarray=True, unit="micron")
deltas = self.wavelength_grid.deltas(asarray=True, unit="micron")
# Calculate the map
self.unevolved_tir_map = integrate_pixel_seds(cube, wavelengths, deltas)
# -----------------------------------------------------------------
def calculate_evolved_tir_map(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Calculating the evolved TIR luminosity in each pixel ...")
cube = self.evolved_datacube.asarray()
wavelengths = self.wavelength_grid.wavelengths(asarray=True, unit="micron")
deltas = self.wavelength_grid.deltas(asarray=True, unit="micron")
# Calculate the map
self.evolved_tir_map = integrate_pixel_seds(cube, wavelengths, deltas)
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the table with the flux fractions
self.write_fractions()
# Write the TIR maps
self.write_tir_maps()
# -----------------------------------------------------------------
def write_fractions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing a table with the flux fractions at different wavelengths ...")
# Determine the path to the table of the flux fractions
path = fs.join(self.analysis_heating_path, "fractions.dat")
# Write the table
tables.write(self.fractions, path, format="ascii.ecsv")
# -----------------------------------------------------------------
def write_tir_maps(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the TIR maps ...")
# Determine the path to the total TIR map
path = fs.join(self.analysis_heating_path, "tir_total.fits")
# Write the total TIR map
self.total_tir_map.save(path)
# Determine the path to the unevolved TIR map
path = fs.join(self.analysis_heating_path, "tir_unevolved.fits")
# Write the unevolved TIR map
self.unevolved_tir_map.save(path)
# Determine the path to the evolved TIR map
path = fs.join(self.analysis_heating_path, "tir_evolved.fits")
# Write the evolved TIR map
self.evolved_tir_map.save(path)
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot the flux fraction as a function of wavelength
self.plot_fractions()
# Plot the maps of the flux fraction at specific wavelengths
self.plot_fraction_maps()
# -----------------------------------------------------------------
def plot_fractions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the flux fractions as a function of wavelength ...")
# Determine the path to the plot file
path = fs.join(self.analysis_heating_path, "fractions.pdf")
# Create the figure
plt.figure(figsize=(7, 5))
plt.ylabel('$F^\prime_{\lambda,\mathrm{unev.}}$ [$\%$]', fontsize=20)
plt.xlabel('$\lambda/\mu\mathrm{m}$', fontsize=20)
plt.xlim(10., 1.e3)
plt.ylim(0., 60.)
plt.xscale('log')
plt.tick_params(labelsize=20)
# plt.subplots_adjust(bottom=0.1)
# plt.plot(dustwls,Fold, 'r-', label="old stars")
plt.plot(self.fractions["Wavelength"], self.fractions["Flux fraction from unevolved stellar populations"], 'k-', label="Unevolved stellar populations")
plt.plot(self.fractions["Wavelength"], self.fractions["Flux fraction from evolved stellar population"], "g-", label="Evolved stellar population")
# plt.plot(dustwls,Fyoung_alternative1, 'r-', label="alt 1")
# plt.plot(dustwls,Fyoung_alternative2, 'g-', label="alt 2")
# plt.plot(dustwls,Fyoung_alternative3, 'c-', label="alt 3")
#plt.plot(dustwls, Fyoung_alternative4, 'k-', label="alt 4")
#plt.fill_between(dustwls, Fyoung, Fyoung_alternative4, color='grey', alpha='0.5')
plt.tight_layout()
# plt.legend(loc='upper left',numpoints=1,markerscale=1.5,fontsize=14)
# Save the figure
plt.savefig(path)
plt.close()
# -----------------------------------------------------------------
def plot_fraction_maps(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting maps of the heating fraction by unevolved stars at specific wavelengths ...")
# Create the image grid plotter
plotter = StandardImageGridPlotter()
# -----------------------------------------------------------------
def integrate_pixel_seds(cube, wls, dwls):
"""
This function ...
:param cube:
:param wls:
:param dwls:
:return:
"""
Lsun = 3.846e26 # Watts
MjySr_to_LsunMicron = 1.e6 * (36./206264.806247)**2 * 1.e-26 * 4*np.pi*(0.785e6*3.086e+16)**2 * 3.e14/(wls**2) / Lsun
xaxis = len(cube[0,0,0:])
yaxis = len(cube[0,0:,0])
zaxis = len(cube[0:,0,0])
slice = np.zeros((yaxis,xaxis))
for i in range(0,yaxis):
for j in range(0,xaxis):
sed = cube[0:,i,j]
slice[i,j] = np.sum(sed * MjySr_to_LsunMicron * dwls)
return slice
# -----------------------------------------------------------------
|
mit
|
smartshark/labelSHARK
|
setup.py
|
1
|
1394
|
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
if not sys.version_info[0] == 3:
print('only python3 supported!')
sys.exit(1)
if not sys.version_info[1] > 5:
print('only python > 3.5 supported, got: {}.{}'.format(sys.version_info[0], sys.version_info[1]))
sys.exit(1)
setup(
name='labelSHARK',
version='2.2.1',
description='Commit labeling for smartSHARK.',
install_requires=['pandas', 'mongoengine', 'pymongo', 'pycoshark>=1.3.1', 'skift',
'fasttext @ https://github.com/facebookresearch/fastText/tarball/master#egg-fasttext-0.10.0',],
dependency_links=['https://github.com/facebookresearch/fastText/tarball/master#egg-fasttext-0.10.0'],
author='atrautsch',
author_email='[email protected]',
url='https://github.com/smartshark/labelSHARK',
download_url='https://github.com/smartshark/labelSHARK/zipball/master',
test_suite='labelSHARK.tests',
packages=find_packages(),
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache2.0 License",
"Operating System :: POSIX :: Linux",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
apache-2.0
|
huzq/scikit-learn
|
sklearn/utils/tests/test_fixes.py
|
6
|
3282
|
# Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import math
import numpy as np
import pytest
import scipy.stats
from sklearn.utils._testing import assert_array_equal
from sklearn.utils.fixes import _joblib_parallel_args
from sklearn.utils.fixes import _object_dtype_isnan
from sklearn.utils.fixes import loguniform
from sklearn.utils.fixes import MaskedArray
@pytest.mark.parametrize('joblib_version', ('0.11', '0.12.0'))
def test_joblib_parallel_args(monkeypatch, joblib_version):
import joblib
monkeypatch.setattr(joblib, '__version__', joblib_version)
if joblib_version == '0.12.0':
# arguments are simply passed through
assert _joblib_parallel_args(prefer='threads') == {'prefer': 'threads'}
assert _joblib_parallel_args(prefer='processes', require=None) == {
'prefer': 'processes', 'require': None}
assert _joblib_parallel_args(non_existing=1) == {'non_existing': 1}
elif joblib_version == '0.11':
# arguments are mapped to the corresponding backend
assert _joblib_parallel_args(prefer='threads') == {
'backend': 'threading'}
assert _joblib_parallel_args(prefer='processes') == {
'backend': 'multiprocessing'}
with pytest.raises(ValueError):
_joblib_parallel_args(prefer='invalid')
assert _joblib_parallel_args(
prefer='processes', require='sharedmem') == {
'backend': 'threading'}
with pytest.raises(ValueError):
_joblib_parallel_args(require='invalid')
with pytest.raises(NotImplementedError):
_joblib_parallel_args(verbose=True)
else:
raise ValueError
@pytest.mark.parametrize("dtype, val", ([object, 1],
[object, "a"],
[float, 1]))
def test_object_dtype_isnan(dtype, val):
X = np.array([[val, np.nan],
[np.nan, val]], dtype=dtype)
expected_mask = np.array([[False, True],
[True, False]])
mask = _object_dtype_isnan(X)
assert_array_equal(mask, expected_mask)
@pytest.mark.parametrize("low,high,base",
[(-1, 0, 10), (0, 2, np.exp(1)), (-1, 1, 2)])
def test_loguniform(low, high, base):
rv = loguniform(base ** low, base ** high)
assert isinstance(rv, scipy.stats._distn_infrastructure.rv_frozen)
rvs = rv.rvs(size=2000, random_state=0)
# Test the basics; right bounds, right size
assert (base ** low <= rvs).all() and (rvs <= base ** high).all()
assert len(rvs) == 2000
# Test that it's actually (fairly) uniform
log_rvs = np.array([math.log(x, base) for x in rvs])
counts, _ = np.histogram(log_rvs)
assert counts.mean() == 200
assert np.abs(counts - counts.mean()).max() <= 40
# Test that random_state works
assert (
loguniform(base ** low, base ** high).rvs(random_state=0)
== loguniform(base ** low, base ** high).rvs(random_state=0)
)
def test_masked_array_deprecated(): # TODO: remove in 0.25
with pytest.warns(FutureWarning, match='is deprecated'):
MaskedArray()
|
bsd-3-clause
|
rkmaddox/mne-python
|
examples/visualization/3d_to_2d.py
|
15
|
4941
|
"""
.. _ex-electrode-pos-2d:
====================================================
How to convert 3D electrode positions to a 2D image.
====================================================
Sometimes we want to convert a 3D representation of electrodes into a 2D
image. For example, if we are using electrocorticography it is common to
create scatterplots on top of a brain, with each point representing an
electrode.
In this example, we'll show two ways of doing this in MNE-Python. First,
if we have the 3D locations of each electrode then we can use Mayavi to
take a snapshot of a view of the brain. If we do not have these 3D locations,
and only have a 2D image of the electrodes on the brain, we can use the
:class:`mne.viz.ClickableImage` class to choose our own electrode positions
on the image.
"""
# Authors: Christopher Holdgraf <[email protected]>
#
# License: BSD (3-clause)
from scipy.io import loadmat
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa
from mne.viz import (plot_alignment, snapshot_brain_montage,
set_3d_view)
print(__doc__)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
path_data = mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat'
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data
# ---------
#
# First we will load a sample ECoG dataset which we'll use for generating
# a 2D snapshot.
mat = loadmat(path_data)
ch_names = mat['ch_names'].tolist()
elec = mat['elec'] # electrode coordinates in meters
# Now we make a montage stating that the sEEG contacts are in head
# coordinate system (although they are in MRI). This is compensated
# by the fact that below we do not specicty a trans file so the Head<->MRI
# transform is the identity.
montage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),
coord_frame='head')
info = mne.create_info(ch_names, 1000., 'ecog').set_montage(montage)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Project 3D electrodes to a 2D snapshot
# --------------------------------------
#
# Because we have the 3D location of each electrode, we can use the
# :func:`mne.viz.snapshot_brain_montage` function to return a 2D image along
# with the electrode positions on that image. We use this in conjunction with
# :func:`mne.viz.plot_alignment`, which visualizes electrode positions.
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'], meg=False)
set_3d_view(figure=fig, azimuth=200, elevation=70)
xy, im = snapshot_brain_montage(fig, montage)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in info['ch_names']])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
###############################################################################
# Manually creating 2D electrode positions
# ----------------------------------------
#
# If we don't have the 3D electrode positions then we can still create a
# 2D representation of the electrodes. Assuming that you can see the electrodes
# on the 2D image, we can use :class:`mne.viz.ClickableImage` to open the image
# interactively. You can click points on the image and the x/y coordinate will
# be stored.
#
# We'll open an image file, then use ClickableImage to
# return 2D locations of mouse clicks (or load a file already created).
# Then, we'll return these xy positions as a layout for use with plotting topo
# maps.
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # To save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=120, color='r')
plt.autoscale(tight=True)
ax.set_axis_off()
plt.show()
|
bsd-3-clause
|
pllim/astropy
|
astropy/visualization/wcsaxes/grid_paths.py
|
5
|
4066
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path
from astropy.coordinates.angle_utilities import angular_separation
# Tolerance for WCS round-tripping, relative to the scale size
ROUND_TRIP_RTOL = 1.
# Tolerance for discontinuities relative to the median
DISCONT_FACTOR = 10.
def get_lon_lat_path(lon_lat, pixel, lon_lat_check):
"""
Draw a curve, taking into account discontinuities.
Parameters
----------
lon_lat : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
lon_lat_check : ndarray
The world coordinates derived from converting from ``pixel``, which is
used to ensure round-tripping.
"""
# In some spherical projections, some parts of the curve are 'behind' or
# 'in front of' the plane of the image, so we find those by reversing the
# transformation and finding points where the result is not consistent.
sep = angular_separation(np.radians(lon_lat[:, 0]),
np.radians(lon_lat[:, 1]),
np.radians(lon_lat_check[:, 0]),
np.radians(lon_lat_check[:, 1]))
# Define the relevant scale size using the separation between the first two points
scale_size = angular_separation(*np.radians(lon_lat[0, :]), *np.radians(lon_lat[1, :]))
with np.errstate(invalid='ignore'):
sep[sep > np.pi] -= 2. * np.pi
mask = np.abs(sep > ROUND_TRIP_RTOL * scale_size)
# Mask values with invalid pixel positions
mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(lon_lat.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# We start off by pre-computing the step in pixel coordinates from one
# point to the next. The idea is to look for large jumps that might indicate
# discontinuities.
step = np.sqrt((pixel[1:, 0] - pixel[:-1, 0]) ** 2 +
(pixel[1:, 1] - pixel[:-1, 1]) ** 2)
# We search for discontinuities by looking for places where the step
# is larger by more than a given factor compared to the median
# discontinuous = step > DISCONT_FACTOR * np.median(step)
discontinuous = step[1:] > DISCONT_FACTOR * step[:-1]
# Skip over discontinuities
codes[2:][discontinuous] = Path.MOVETO
# The above missed the first step, so check that too
if step[0] > DISCONT_FACTOR * step[1]:
codes[1] = Path.MOVETO
# Create the path
path = Path(pixel, codes=codes)
return path
def get_gridline_path(world, pixel):
"""
Draw a grid line
Parameters
----------
world : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
"""
# Mask values with invalid pixel positions
mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(world.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# Create the path
path = Path(pixel, codes=codes)
return path
|
bsd-3-clause
|
stefansommer/jetflows
|
code/examples/simtest.py
|
1
|
6469
|
#!/usr/bin/python
#
# This file is part of jetflows.
#
# Copyright (C) 2014, Henry O. Jacobs ([email protected]), Stefan Sommer ([email protected])
# https://github.com/nefan/jetflows.git
#
# jetflows is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jetflows is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jetflows. If not, see <http://www.gnu.org/licenses/>.
#
"""
Test convergence of match term for different orders
"""
import __builtin__
#__builtin__.__debug = True
import matching.imagesim as imsim
import two_jets as tj
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from PIL import Image
import logging
logging.basicConfig(level=logging.DEBUG,format="[%(filename)s:%(lineno)s - %(funcName)6s() ] %(message)s")
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype="int32" )
return data
# create .npy files
#moving = '../data/Lenna-bw.png' # smooth approx 0.02
#moving_im = load_image(moving)
#moving = '../data/Lenna-bw.npy'
#moving_im = moving_im[250:300,300:350]
# notes: smoothing 0, border 0 for the below examples
moving = '../data/sine.npy'
(x,y) = np.mgrid[0:512,0:512]
import sympy as sp
sx = sp.Symbol('x')
sy = sp.Symbol('y')
moving_im = np.sin(2.*np.pi*(3.*x+0.*y)/512.)+(x/(512.))**2 # linspace 1,6,24
e = sp.sin(2*sp.pi*(3*sx+0*sy)/512)+(sx/(512))**2
baseline_sp = np.double(sp.integrate(e**2,(sx,0,512),(sy,0,512)).evalf()/512**2)
logging.info("baseline sp: %g", baseline_sp)
#f = lambda x: np.mod(x,2.*np.pi)/np.pi if np.mod(x,2.*np.pi) < np.pi else 2-np.mod(x,2.*np.pi)/np.pi
#moving_im = np.array([f(5.*2.*np.pi*x[i,j]/512.) for i in range(0,512) for j in range(0,512)]).reshape([512,512])
#moving_im = np.sin(((x+y)/(2.*512.))**2)
#moving_im = np.sin((x+y)/(2.*512.))
#moving_im = ((x+y)/(2.*512.))**2 # linspace 0,5,18, for paper
#moving_im = (x/(512.))**2
#moving_im = (x+y)/(2.*512.) # linspace 0,5,18, for paper
#moving_im = x/(512.)
#moving_im = .7*np.ones(x.shape)
np.save(moving,moving_im)
fixed_im = np.zeros(moving_im.shape)
fixed = '../data/zero.npy'
np.save(fixed,fixed_im)
#plt.figure(22)
#plt.imshow(moving_im)
#plt.set_cmap('gray')
# match options
visualize = True
border = 0
smoothing = 0.0
SIGMAF = 2.**(-1)
splineS = 1e2
logging.info("image dimensions: %s", moving_im.shape)
#hk = np.linspace(0,5,18) # for linear and quadratic plots
hk = np.linspace(1,6,24) # np.array([2,3]) for image with points
hs = np.zeros(hk.shape)
ppas = np.zeros(hk.shape)
print "hk: " + str(hk)
res = np.zeros(np.shape(hk))
colors = ['b','r','g']
logfig = plt.figure(21)
ax1 = logfig.add_subplot(111)
ax2 = ax1.twinx()
# get baseline, order 0
# nr points
pointsPerAxis = moving_im.shape[0]
h = 1. / pointsPerAxis
# get similarity
sim = imsim.get(pointsPerAxis, immname=moving, imfname=fixed, immT=None, border=border, normalize=False, visualize=visualize, order=2, smoothscaleFactor=pointsPerAxis*smoothing, SIGMAF=SIGMAF, h=h, splineS=splineS)
DIM = tj.DIM = sim['DIM']
N = tj.N = sim['N']
logging.info("N points: %d", N)
# state
p = np.zeros([N,DIM])
mu_1 = np.zeros([N,DIM,DIM])
mu_2 = np.zeros([N,DIM,DIM,DIM])
(q,q_1,q_2) = (sim['initial'][0], np.outer(np.ones(N),np.eye(DIM)).reshape([N,DIM,DIM]), np.zeros([N,DIM,DIM,DIM]))
state = tj.weinstein_darboux_to_state(q, q_1, q_2, p, mu_1, mu_2)# initial guess + zeros
# runsim
baseline = sim['f'](state, visualize=False)[0]
logging.info("baseline sim: %g", baseline)
#baseline = 0.
for order in [0,2]:
for i in range(len(hk)):
# nr points
h = 1. / 2**hk[i]
pointsPerAxis = np.ceil(1. / h)
h = 1. / pointsPerAxis
hs[i] = h
ppas[i] = pointsPerAxis
# get similarity
sim = imsim.get(pointsPerAxis, immname=moving, imfname=fixed, immT=None, border=border, normalize=False, visualize=visualize, order=order, smoothscaleFactor=pointsPerAxis*smoothing, SIGMAF=SIGMAF, h=h, splineS=splineS)
DIM = tj.DIM = sim['DIM']
N = tj.N = sim['N']
logging.info("N points: %d", N)
# state
p = np.zeros([N,DIM])
mu_1 = np.zeros([N,DIM,DIM])
mu_2 = np.zeros([N,DIM,DIM,DIM])
(q,q_1,q_2) = (sim['initial'][0], np.outer(np.ones(N),np.eye(DIM)).reshape([N,DIM,DIM]), np.zeros([N,DIM,DIM,DIM]))
state = tj.weinstein_darboux_to_state(q, q_1, q_2, p, mu_1, mu_2)# initial guess + zeros
# runsim
val = sim['f'](state, visualize=False)[0]
logging.info("sim: %g", val)
# save result
res[i] = val
logging.info("results, order %d: %s", order, res[i])
logx = -np.log(hs[:-1])
logy = np.log(np.abs(baseline-res[:-1]))
convrate = -np.diff(logy)/(np.diff(logx)+1e-8)
logging.info("convergence rates, order %d: %s", order, convrate)
logging.info("mean convergence rates, order %d: %s", order, np.mean(convrate[np.isfinite(convrate)]))
# plot
plt.figure(20)
plt.plot(hk,res,colors[order],label='order '+str(order))
plt.figure(21)
ax1.plot(logx,logy,colors[order],label='order '+str(order))
ax2.plot(logx[1:],convrate,colors[order]+'--',label='order '+str(order))
ax2.set_ylim(-7,7)
plt.xlim(-np.log(hs[1]),-np.log(hs[-1]))
# ref
L2ref = 1./np.prod(moving_im.shape)*np.sum( moving_im**2 )
#plt.plot(np.arange(len(res)),np.repeat(L2ref,res.shape),'j',label='pixelwise')
logging.info("L2 ref (non-smoothed): %g", L2ref )
# plots
plt.figure(20)
plt.title('Match Term Approximation')
plt.xlabel('$j=1,\ldots,6$ ($h=\mathrm{ceil}(2^{-j})$)')
#plt.xlabel('$j=0,\ldots,5$ ($h=\mathrm{ceil}(2^{-j})$)')
plt.ylabel('measured $L^2$ dissimilarity')
#plt.ylim(0,2)
plt.legend()
plt.gcf().savefig('../results/simtest.eps')
# plots, log
plt.figure(21)
plt.title('Match Term Approximation, Log-log-scale')
ax1.set_xlabel('$-\mathrm{log}(h)$')
ax1.set_ylabel('$\mathrm{log}-L^2$ dissimilarity error')
ax2.set_ylabel('convergence rate (neg. slope of log-error)')
ax1.legend()
plt.show(block=True)
plt.gcf().savefig('../results/simtest-log.eps')
|
agpl-3.0
|
rajat1994/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
sunzhxjs/JobGIS
|
lib/python2.7/site-packages/pandas/tseries/offsets.py
|
9
|
87012
|
from datetime import date, datetime, timedelta
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.tseries.tools import to_datetime
from pandas.tseries.timedeltas import to_timedelta
from pandas.core.common import ABCSeries, ABCDatetimeIndex
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
import pandas.tslib as tslib
from pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta
import functools
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd','CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'BusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def as_datetime(obj):
f = getattr(obj,'to_pydatetime',None)
if f is not None:
obj = f()
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslib.NaT:
return tslib.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = tslib.tz_convert_single(result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = normalize_date(result)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
return result
return wrapper
def apply_index_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
result = func(self, other)
if self.normalize:
result = result.to_period('D').to_timestamp()
return result
return wrapper
def _is_normalized(dt):
if (dt.hour != 0 or dt.minute != 0 or dt.second != 0
or dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):
return False
return True
#----------------------------------------------------------------------
# DateOffset
class ApplyTypeError(TypeError):
# sentinel class for catching the apply error to return NotImplemented
pass
class CacheableOffset(object):
_cacheable = True
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
_cacheable = False
_normalize_cache = True
_kwds_use_relativedelta = (
'years', 'months', 'weeks', 'days',
'year', 'month', 'week', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond'
)
_use_relativedelta = False
_adjust_dst = False
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self._offset, self._use_relativedelta = self._determine_offset()
def _determine_offset(self):
# timedelta is used for sub-daily plural offsets and all singular offsets
# relativedelta is used for plural offsets of daily length or more
# nanosecond(s) are handled by apply_wraps
kwds_no_nanos = dict(
(k, v) for k, v in self.kwds.items()
if k not in ('nanosecond', 'nanoseconds')
)
use_relativedelta = False
if len(kwds_no_nanos) > 0:
if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos):
use_relativedelta = True
offset = relativedelta(**kwds_no_nanos)
else:
# sub-daily offset - use timedelta (tz-aware)
offset = timedelta(**kwds_no_nanos)
else:
offset = timedelta(1)
return offset, use_relativedelta
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = tslib._localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
.. versionadded:: 0.17.0
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if not type(self) is DateOffset:
raise NotImplementedError("DateOffset subclass %s "
"does not have a vectorized "
"implementation"
% (self.__class__.__name__,))
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(self.kwds).issubset(relativedelta_fast)):
months = ((self.kwds.get('years', 0) * 12
+ self.kwds.get('months', 0)) * self.n)
if months:
shifted = tslib.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (self.kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + i.to_perioddelta('W')
timedelta_kwds = dict((k,v) for k,v in self.kwds.items()
if k in ['days','hours','minutes',
'seconds','microseconds'])
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) %s not able to be "
"applied vectorized" %
(set(self.kwds) - relativedelta_fast),)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, normalize=self.normalize, **self.kwds)
def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
exclude = ['kwds', 'name','normalize', 'calendar']
attrs = [(k, v) for k, v in all_paras.items() if (k not in exclude ) and (k[0] != '_')]
attrs = sorted(set(attrs))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
if ((attr == 'kwds' and len(self.kwds) == 0)
or attr.startswith('_')):
continue
elif attr == 'kwds':
kwds_new = {}
for key in self.kwds:
if not hasattr(self, key):
kwds_new[key] = self.kwds[key]
if len(kwds_new) > 0:
attrs.append('='.join((attr, repr(kwds_new))))
else:
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
@property
def name(self):
if hasattr(self, '_named'):
return self._named
else:
return self.rule_code
def __eq__(self, other):
if other is None:
return False
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if not isinstance(other, DateOffset):
return False
return self._params() == other._params()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset.')
elif type(other) == type(self):
return self.__class__(self.n - other.n, normalize=self.normalize, **self.kwds)
else: # pragma: no cover
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other - self
return self.__class__(-self.n, normalize=self.normalize, **self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, normalize=self.normalize, **self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, normalize=self.normalize, **self.kwds)
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# helpers for vectorized offsets
def _beg_apply_index(self, i, freq):
"""Offsets index to beginning of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n < 0:
# when subtracting, dates on start roll to prior
roll = np.where(base_period.to_timestamp() == i - off,
self.n, self.n + 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp()
return base + off
def _end_apply_index(self, i, freq):
"""Offsets index to end of Period frequency"""
off = i.to_perioddelta('D')
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n > 0:
# when adding, dtates on end roll to next
roll = np.where(base_period.to_timestamp(how='end') == i - off,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
return fstr
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix %s" % suffix)
return cls()
class BusinessMixin(object):
""" mixin to business types to provide related functions """
# TODO: Combine this with DateOffset by defining a whitelisted set of
# attributes on each object rather than the existing behavior of iterating
# over internal ``__dict__``
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', self.__class__.__name__)
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
return out
def _repr_attrs(self):
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
if self.offset:
fstr += self._offset_str()
return fstr
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
# avoid slowness below
if abs(n) > 5:
k = n // 5
result = result + timedelta(7 * k)
if n < 0 and result.weekday() > 4:
n += 1
n -= 5 * k
if n == 0 and result.weekday() > 4:
n -= 1
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHour(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
# must be validated here to equality check
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.start = kwds.get('start', '09:00')
self.end = kwds.get('end', '17:00')
# used for moving to next businessday
if self.n >= 0:
self.next_bday = BusinessDay(n=1)
else:
self.next_bday = BusinessDay(n=-1)
def _validate_time(self, t_input):
from datetime import time as dt_time
import time
if isinstance(t_input, compat.string_types):
try:
t = time.strptime(t_input, '%H:%M')
return dt_time(hour=t.tm_hour, minute=t.tm_min)
except ValueError:
raise ValueError("time data must match '%H:%M' format")
elif isinstance(t_input, dt_time):
if t_input.second != 0 or t_input.microsecond != 0:
raise ValueError("time data must be specified only with hour and minute")
return t_input
else:
raise ValueError("time data must be string or datetime.time")
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHour, self)._repr_attrs()
attrs = ['BH=%s-%s' % (self.start.strftime('%H:%M'),
self.end.strftime('%H:%M'))]
out += ': ' + ', '.join(attrs)
return out
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag():
# create dummy datetime to calcurate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
else:
self.daytime = False
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec()
if self.n >= 0:
dt = self._prev_opening_time(dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
# calcurate here because offset is not immutable
daytime = self._get_daytime_flag()
businesshours = self._get_business_hours_by_sec()
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight busienss hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calcurate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(result- timedelta(seconds=1)) +bhdelta
return result
else:
raise ApplyTypeError('Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec()
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calcurated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = tslib.tot_seconds(dt - op)
if span <= businesshours:
return True
else:
return False
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
excluding holidays
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
calendar, holidays = self.get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
def get_calendar(self, weekmask, holidays, calendar):
'''Generate busdaycalendar'''
if isinstance(calendar, np.busdaycalendar):
if not holidays:
holidays = tuple(calendar.holidays)
elif not isinstance(holidays, tuple):
holidays = tuple(holidays)
else:
# trust that calendar.holidays and holidays are
# consistent
pass
return calendar, holidays
if holidays is None:
holidays = []
try:
holidays = holidays + calendar.holidays().tolist()
except AttributeError:
pass
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
holidays = tuple(sorted(holidays))
kwargs = {'weekmask': weekmask}
if holidays:
kwargs['holidays'] = holidays
try:
busdaycalendar = np.busdaycalendar(**kwargs)
except:
# Check we have the required numpy version
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise NotImplementedError("CustomBusinessDay requires numpy >= "
"1.7.0. Current version: " +
np.__version__)
else:
raise
return busdaycalendar, holidays
def __getstate__(self):
"""Return a pickleable state"""
state = self.__dict__.copy()
del state['calendar']
# we don't want to actually pickle the calendar object
# as its a np.busyday; we recreate on deserilization
try:
state['kwds'].pop('calendar')
except:
pass
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
calendar, holidays = self.get_calendar(weekmask=self.weekmask,
holidays=self.holidays,
calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
# Currently
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
if getattr(dt, 'tzinfo', None) is not None:
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
dt = Timestamp(dt)
dt = np.datetime64(dt)
if dt.dtype.name != dtype:
dt = dt.astype(dtype)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = self._to_dt64(dt,'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
return "%s-%s" % (self.rule_code, _int_to_month[self.n])
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
@apply_wraps
def apply(self, other):
n = self.n
_, days_in_month = tslib.monthrange(other.year, other.month)
if other.day != days_in_month:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
months = self.n - 1 if self.n >= 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'end')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = tslib.monthrange(dt.year, dt.month)[1]
return dt.day == days_in_month
_prefix = 'M'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
return other + relativedelta(months=n, day=1)
@apply_index_wraps
def apply_index(self, i):
months = self.n + 1 if self.n < 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'start')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == 1
_prefix = 'MS'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
_prefix = 'BM'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
if other.day > first and n <= 0:
# as if rolled forward already
n += 1
elif other.day < first and n > 0:
other = other + timedelta(days=first - other.day)
n -= 1
other = other + relativedelta(months=n)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first, other.hour, other.minute,
other.second, other.microsecond)
return result
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
first_weekday, _ = tslib.monthrange(dt.year, dt.month)
if first_weekday == 5:
return dt.day == 3
elif first_weekday == 6:
return dt.day == 2
else:
return dt.day == 1
_prefix = 'BMS'
class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBM'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
# First move to month offset
cur_mend = self.m_offset.rollforward(other)
# Find this custom month offset
cur_cmend = self.cbday.rollback(cur_mend)
# handle zero case. arbitrarily rollforward
if n == 0 and other != cur_cmend:
n += 1
if other < cur_cmend and n >= 1:
n -= 1
elif other > cur_cmend and n <= -1:
n += 1
new = cur_mend + n * self.m_offset
result = self.cbday.rollback(new)
return result
class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBMS'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
dt_in = other
# First move to month offset
cur_mbegin = self.m_offset.rollback(dt_in)
# Find this custom month offset
cur_cmbegin = self.cbday.rollforward(cur_mbegin)
# handle zero case. arbitrarily rollforward
if n == 0 and dt_in != cur_cmbegin:
n += 1
if dt_in > cur_cmbegin and n <= -1:
n += 1
elif dt_in < cur_cmbegin and n >= 1:
n -= 1
new = cur_mbegin + n * self.m_offset
result = self.cbday.rollforward(new)
return result
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
base = other
if self.weekday is None:
return other + self.n * self._inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
other = other
for i in range(k):
other = other + self._inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in range(-k):
other = other - self._inc
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return (i.to_period('W') + self.n).to_timestamp() + i.to_perioddelta('W')
else:
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() == self.weekday
_prefix = 'W'
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
suffix = '-%s' % (_int_to_weekday[self.weekday])
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class WeekDay(object):
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
_int_to_weekday = {
WeekDay.MON: 'MON',
WeekDay.TUE: 'TUE',
WeekDay.WED: 'WED',
WeekDay.THU: 'THU',
WeekDay.FRI: 'FRI',
WeekDay.SAT: 'SAT',
WeekDay.SUN: 'SUN'
}
_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())
class WeekOfMonth(DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
self.week = kwds['week']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
@apply_wraps
def apply(self, other):
base = other
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
other = self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
other = datetime(other.year, other.month, other.day, base.hour,
base.minute, base.second, base.microsecond)
return other
def getOffsetOfMonth(self, dt):
w = Week(weekday=self.weekday)
d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
d = w.rollforward(d)
for i in range(self.week):
d = w.apply(d)
return d
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
return d == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%d%s' % (self._prefix, self.week + 1,
_int_to_weekday.get(self.weekday, ''))
_prefix = 'WOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = _weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of each month"
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self.kwds = kwds
@apply_wraps
def apply(self, other):
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
return self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
def getOffsetOfMonth(self, dt):
m = MonthEnd()
d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,
dt.second, dt.microsecond, tzinfo=dt.tzinfo)
eom = m.rollforward(d)
w = Week(weekday=self.weekday)
return w.rollback(eom)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))
_prefix = 'LWOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
#: default month for __init__
_default_startingMonth = None
#: default month in _from_name
_from_name_startingMonth = None
_adjust_dst = True
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth',
self._default_startingMonth)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = _month_to_int[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
# 'BQ'
_from_name_startingMonth = 12
_prefix = 'BQ'
@apply_wraps
def apply(self, other):
n = self.n
base = other
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
other = tslib._localize_pydatetime(other, base.tzinfo)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return BMonthEnd().onOffset(dt) and modMonth == 0
_int_to_month = tslib._MONTH_ALIASES
_month_to_int = dict((v, k) for k, v in _int_to_month.items())
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
monthsSince = monthsSince - 3
# roll forward if on same month later than first bday
if n <= 0 and (monthsSince == 0 and other.day > first):
n = n + 1
# pretend to roll back if on same month but before firstbday
elif n > 0 and (monthsSince == 0 and other.day < first):
n = n - 1
# get the first bday for result
other = other + relativedelta(months=3 * n - monthsSince)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute, other.second,
other.microsecond)
return result
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth', 3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
n = n - 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return MonthEnd().onOffset(dt) and modMonth == 0
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0:
# make sure you roll forward, so negate
monthsSince = monthsSince - 3
if n < 0 and (monthsSince == 0 and other.day > 1):
# after start, so come back an extra period as if rolled forward
n = n + 1
other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
freqstr = 'Q-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.month = kwds.get('month', self._default_month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, normalize=normalize, **kwds)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = _month_to_int[suffix]
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.month])
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
lastBDay = (days_in_month -
max(((wkday + days_in_month - 1) % 7) - 4, 0))
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = tslib.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month,
other.hour, other.minute, other.second,
other.microsecond)
if result.weekday() > 4:
result = result - BDay()
return result
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
years = n
if n > 0: # roll back first for positive n
if (other.month < self.month or
(other.month == self.month and other.day < first)):
years -= 1
elif n <= 0: # roll forward
if (other.month > self.month or
(other.month == self.month and other.day > first)):
years += 1
# set first bday for result
other = other + relativedelta(years=years)
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
return datetime(other.year, self.month, first, other.hour,
other.minute, other.second, other.microsecond)
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
@apply_wraps
def apply(self, other):
def _increment(date):
if date.month == self.month:
_, days_in_month = tslib.monthrange(date.year, self.month)
if date.day != days_in_month:
year = date.year
else:
year = date.year + 1
elif date.month < self.month:
year = date.year
else:
year = date.year + 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _decrement(date):
year = date.year if date.month > self.month else date.year - 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _rollf(date):
if date.month != self.month or\
date.day < tslib.monthrange(date.year, date.month)[1]:
date = _increment(date)
return date
n = self.n
result = other
if n > 0:
while n > 0:
result = _increment(result)
n -= 1
elif n < 0:
while n < 0:
result = _decrement(result)
n += 1
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
# convert month anchor to annual period tuple
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
wkday, days_in_month = tslib.monthrange(dt.year, self.month)
return self.month == dt.month and dt.day == days_in_month
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
@apply_wraps
def apply(self, other):
def _increment(date, n):
year = date.year + n - 1
if date.month >= self.month:
year += 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _decrement(date, n):
year = date.year + n + 1
if date.month < self.month or (date.month == self.month and
date.day == 1):
year -= 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _rollf(date):
if (date.month != self.month) or date.day > 1:
date = _increment(date, 1)
return date
n = self.n
result = other
if n > 0:
result = _increment(result, n)
elif n < 0:
result = _decrement(result, n)
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
freqstr = 'A-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == 1
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_suffix_prefix_last = 'L'
_suffix_prefix_nearest = 'N'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds['startingMonth']
self.weekday = kwds["weekday"]
self.variation = kwds["variation"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('%s is not a valid variation' % self.variation)
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
self._rd_forward = relativedelta(weekday=weekday_offset)
self._rd_backward = relativedelta(weekday=weekday_offset(-1))
else:
self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
def isAnchored(self):
return self.n == 1 \
and self.startingMonth is not None \
and self.weekday is not None
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return year_end == dt or \
self.get_year_end(dt - relativedelta(months=1)) == dt
else:
return year_end == dt
@apply_wraps
def apply(self, other):
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo)
cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo)
next_year = tslib._localize_pydatetime(next_year, other.tzinfo)
if n > 0:
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other < prev_year:
year = other.year - 1
n -= 1
elif other < cur_year:
year = other.year
n -= 1
elif other < next_year:
year = other.year + 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year + n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
else:
n = -n
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other > next_year:
year = other.year + 1
n -= 1
elif other > cur_year:
year = other.year
n -= 1
elif other > prev_year:
year = other.year - 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year - n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
def get_year_end(self, dt):
if self.variation == "nearest":
return self._get_year_end_nearest(dt)
else:
return self._get_year_end_last(dt)
def get_target_month_end(self, dt):
target_month = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
next_month_first_of = target_month + relativedelta(months=+1)
return next_month_first_of + relativedelta(days=-1)
def _get_year_end_nearest(self, dt):
target_date = self.get_target_month_end(dt)
if target_date.weekday() == self.weekday:
return target_date
else:
forward = target_date + self._rd_forward
backward = target_date + self._rd_backward
if forward - target_date < target_date - backward:
return forward
else:
return backward
def _get_year_end_last(self, dt):
current_year = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
return current_year + self._offset_lwom
@property
def rule_code(self):
suffix = self.get_rule_code_suffix()
return "%s-%s" % (self._get_prefix(), suffix)
def _get_prefix(self):
return self._prefix
def _get_suffix_prefix(self):
if self.variation == "nearest":
return self._suffix_prefix_nearest
else:
return self._suffix_prefix_last
def get_rule_code_suffix(self):
return '%s-%s-%s' % (self._get_suffix_prefix(), \
_int_to_month[self.startingMonth], \
_int_to_weekday[self.weekday])
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError(
"Unable to parse varion_code: %s" % (varion_code,))
startingMonth = _month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
return {
"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation,
}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
self._offset = FY5253( \
startingMonth=kwds['startingMonth'], \
weekday=kwds["weekday"],
variation=kwds["variation"])
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
@apply_wraps
def apply(self, other):
base = other
n = self.n
if n > 0:
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
start = other - self._offset
else:
start = other
qtr_lens = self.get_weeks(other + self._offset)
for weeks in qtr_lens:
start += relativedelta(weeks=weeks)
if start > other:
other = start
n -= 1
break
else:
n = -n
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
end = other + self._offset
else:
end = other
qtr_lens = self.get_weeks(other)
for weeks in reversed(qtr_lens):
end -= relativedelta(weeks=weeks)
if end < other:
other = end
n -= 1
break
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
if self._offset.onOffset(dt):
prev_year_end = dt - self._offset
next_year_end = dt
else:
next_year_end = dt + self._offset
prev_year_end = dt - self._offset
week_in_year = (next_year_end - prev_year_end).days / 7
return week_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens[0:4]:
current += relativedelta(weeks=qtr_len)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
return "%s-%s" % (self._prefix,
"%s-%d" % (suffix, self.qtr_with_extra_week))
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
'''
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
'''
_adjust_dst = True
def __init__(self, n=1, **kwds):
super(Easter, self).__init__(n, **kwds)
@apply_wraps
def apply(self, other):
currentEaster = easter(other.year)
currentEaster = datetime(currentEaster.year, currentEaster.month, currentEaster.day)
currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo)
# NOTE: easter returns a datetime.date so we have to convert to type of other
if self.n >= 0:
if other >= currentEaster:
new = easter(other.year + self.n)
else:
new = easter(other.year + self.n - 1)
else:
if other > currentEaster:
new = easter(other.year + self.n + 1)
else:
new = easter(other.year + self.n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
#----------------------------------------------------------------------
# Ticks
import operator
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return DateOffset.__eq__(self, other)
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params())
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return DateOffset.__ne__(self, other)
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return _delta_to_nanoseconds(self.delta)
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
else:
raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)
_prefix = 'undefined'
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = _delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
_delta_to_nanoseconds = tslib._delta_to_nanoseconds
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def _get_firstbday(wkday):
"""
wkday is the result of monthrange(year, month)
If it's a saturday or sunday, increment first business day to reflect this
"""
first = 1
if wkday == 5: # on Saturday
first = 3
elif wkday == 6: # on Sunday
first = 2
return first
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset %s did not increment date' % offset)
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset %s did not decrement date' % offset)
cur = next_date
prefix_mapping = dict((offset._prefix, offset) for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
])
prefix_mapping['N'] = Nano
def _make_offset(key):
"""Gets offset based on key. KeyError if prefix is bad, ValueError if
suffix is bad. All handled by `get_offset` in tseries/frequencies. Not
public."""
if key is None:
return None
split = key.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too many '-')
obj = klass._from_name(*split[1:])
obj._named = key
return obj
|
mit
|
macks22/scikit-learn
|
sklearn/calibration.py
|
137
|
18876
|
"""Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
|
bsd-3-clause
|
zaxtax/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
31
|
41654
|
from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"matthews_corrcoef_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS), set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
|
bsd-3-clause
|
DonBeo/scikit-learn
|
sklearn/qda.py
|
21
|
7639
|
"""
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
bsd-3-clause
|
tejasckulkarni/hydrology
|
ch_616/ch_616_stage_vol.py
|
2
|
8192
|
__author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
from spread import spread
from bisect import bisect_left, bisect_right
from matplotlib import rc
from scipy.interpolate import griddata
from matplotlib import cm
from matplotlib.path import *
from mpl_toolkits.mplot3d import axes3d, Axes3D
import matplotlib as mpl
# latex parameters
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=18)
base_file = '/media/kiruba/New Volume/milli_watershed/stream_profile/616/base_profile_616.csv'
df_base = pd.read_csv(base_file, header=-1, skiprows=1)
# print df_base.head()
slope_file = '/media/kiruba/New Volume/milli_watershed/stream_profile/616/slope_616.csv'
df_slope = pd.read_csv(slope_file, header=0)
# print df_slope
df_base_trans = df_base.T
df_base_trans.columns = df_base_trans.ix[0, 0:]
# print df_base_trans
df_base_trans = df_base_trans.ix[1:, 1500:]
# print df_base_trans
# raise SystemExit(0)
"""
Filling of profile
"""
def find_range(array, ab):
if ab < max(array):
start = bisect_left(array, ab)
return array[start-1]
else:
return max(array)
def fill_profile(base_df, slope_df, midpoint_index):
"""
:param base_df: base profile
:param slope_df: slope profile
:param midpoint_index: index of midpoint(x=0)
:return:
"""
base_z = base_df.ix[midpoint_index, 0:]
slope_z = slope_df.ix[ :, 1]
base_y = base_z.index
# print base_z
# base_y_list =base_y.tolist()
slope_y = slope_df.ix[:, 0]
slope_z.index = slope_y
# print slope_z.head()
# print base_z
new_base_df = base_df
for y_s in slope_z.index:
if y_s not in base_z.index.tolist():
# print y_s
y_t = find_range(base_y, y_s)
template = base_df[y_t]
z1 = template.ix[midpoint_index, ]
# print z1
z2 = slope_z[y_s]
diff = z2 - z1
# print template
# print diff
profile = template + diff
profile.name = y_s
# profile.loc[0] = y_s
# profile = profile.sort_index()
# print profile
# no_of_col = len(base_df.columns)
new_base_df = new_base_df.join(profile, how='right')
# base_df.columns.values[no_of_col+1] = y_s
return new_base_df
def set_column_sequence(dataframe, seq):
'''Takes a dataframe and a subsequence of its columns, returns dataframe with seq as first columns'''
cols = seq[:] # copy so we don't mutate seq
for x in dataframe.columns:
if x not in cols:
cols.append(x)
return dataframe[cols]
created_profile = fill_profile(df_base_trans, df_slope, 7)
# created_profile = created_profile[sorted(created_profile.columns)]
# print created_profile.head()
sorted_df = created_profile.iloc[0:, 1:]
sorted_df = sorted_df[sorted(sorted_df.columns)]
sorted_df = sorted_df.join(created_profile.iloc[0:, 0], how='right')
created_profile = set_column_sequence(sorted_df, [1500])
# print created_profile.head()
# raise SystemExit(0)
"""
Create (x,y,z) point cloud
"""
z_array = created_profile.iloc[0:, 1:]
columns = z_array.columns
z_array = z_array.values
index = created_profile.iloc[0:,0]
df = pd.DataFrame(z_array, columns=columns).set_index(index)
data_1 = []
for y, row in df.iteritems():
for x, z in row.iteritems():
data_1.append((x, y, z))
data_1_df = pd.DataFrame(data_1, columns=['x', 'y', 'z'])
print data_1_df.head()
# raise SystemExit(0)
X = data_1_df.x
Y = data_1_df.y
Z = data_1_df.z
## contour and 3d surface plotting
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.gca(projection='3d')
# ax = fig.add_subplot(1, 2, 1, projection='3d')
xi = np.linspace(X.min(), X.max(), 100)
yi = np.linspace(Y.min(), Y.max(), 100)
# print len(xi)
# print len(yi)
# print len(Z)
zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='linear') # create a uniform spaced grid
xig, yig = np.meshgrid(xi, yi)
surf = ax.plot_surface(xig, yig, zi, rstride=5, cstride=3, linewidth=0, cmap=cm.coolwarm, antialiased=False) # 3d plot
# inter_1 = []
# inter_1.append((xi, yi, zi))
# inter = pd.DataFrame(inter_1, columns=['x', 'y', 'z'])
# inter.to_csv('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/inter.csv') # interpolation data output
fig.colorbar(surf, shrink=0.5, aspect=5)
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel(r'\textbf{X} (m)')
# plt.ylabel(r'\textbf{Y} (m)')
# plt.title(r"Profile for 591", fontsize=16)
plt.gca().invert_xaxis() # reverses x axis
# # ax = fig
# plt.savefig('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/linear_interpolation')
plt.show()
raise SystemExit(0)
# ## trace contours
# Refer: Nikolai Shokhirev http://www.numericalexpert.com/blog/area_calculation/
check_dam_height = 1.5 #metre
levels = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] #, 3.93]
plt.figure(figsize=(11.69, 8.27))
CS = plt.contourf(xi, yi, zi, len(levels), alpha=.75, cmap=cm.hot, levels=levels)
C = plt.contour(xi, yi, zi, len(levels), colors='black', linewidth=.5, levels=levels)
plt.clabel(C, inline=1, fontsize=10)
plt.colorbar(CS, shrink=0.5, aspect=5)
plt.yticks(np.arange(0,40, 5))
plt.xticks(np.arange(-6,6, 2))
plt.grid()
plt.gca().invert_xaxis()
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_616/cont_2d')
plt.show()
# for i in range(len(CS.collections)):
# print CS.levels[i]
#
# for i in range(len(C.collections)):
# print(C.levels[i])
def contour_area(mpl_obj):
"""
Returns a array of contour levels and
corresponding cumulative area of contours
:param mpl_obj: Matplotlib contour object
:return: [(level1, area1), (level1, area1+area2)]
"""
#Refer: Nikolai Shokhirev http://www.numericalexpert.com/blog/area_calculation/
n_c = len(mpl_obj.collections) # n_c = no of contours
print 'No. of contours = %s' % n_c
area = 0.0000
cont_area_array = []
for contour in range(n_c):
# area = 0
n_p = len(mpl_obj.collections[contour].get_paths())
zc = mpl_obj.levels[contour]
for path in range(n_p):
p = mpl_obj.collections[contour].get_paths()[path]
v = p.vertices
l = len(v)
s = 0.0000
for i in range(l):
j = (i+1) % l
s += (v[j, 0] - v[i, 0]) * (v[j, 1] + v[i, 1])
poly_area = 0.5*abs(s)
area += poly_area
cont_area_array.append((zc, area))
return cont_area_array
# contour_area(C)
contour_a = contour_area(CS)
cont_area_df = pd.DataFrame(contour_a, columns=['Z', 'Area'])
plt.plot(cont_area_df['Z'], cont_area_df['Area'])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.ylabel(r'\textbf{Area} ($m^2$)')
plt.xlabel(r'\textbf{Stage} (m)')
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_616/cont_area_616')
# plt.show()
cont_area_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_616/cont_area.csv')
## Curve fitting
# fig = plt.figure(figsize=(11.69, 8.27))
y = cont_area_df['Area']
x = cont_area_df['Z']
#calculate 2nd deg polynomial
po = np.polyfit(x, y, 1)
f = np.poly1d(po)
print po
print np.poly1d(f)
#calculate new x, y
x_new = np.linspace(min(x), max(x), 50)
y_new = f(x_new)
fig = plt.figure(figsize=(11.69, 8.27))
plt.plot(x, y, 'o', x_new, y_new)
plt.xlim([(min(x))-1, (max(x))+1])
plt.xlabel(r'\textbf{Stage} (m)')
plt.ylabel(r'\textbf{Area} ($m^2$)')
plt.text(-0.8, 500, r"$y = {0:.2f}x {1:.2f} $".format(po[0], po[1]))
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_616/poly_2_deg_616')
plt.show()
created_profile.iloc[0] = created_profile.columns
print created_profile
created_profile.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_616/created_profile_616.csv')
|
gpl-3.0
|
0x0all/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.