repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
flightgong/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
kazemakase/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
gclenaghan/scikit-learn | examples/model_selection/plot_roc_crossval.py | 37 | 3474 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k',
label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
hitszxp/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 20 | 21431 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
"""Check classification for various parameter settings."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
"""Check classification for various parameter settings on sparse input."""
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
"""Check regression for various parameter settings."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
"""Check regression for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
"""Test that bootstraping samples generate non-perfect base estimators."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
"""Test that bootstraping features may generate dupplicate features."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
"""Predict probabilities."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
"""Check that oob prediction is a good estimation of the generalization
error."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
"""Check that oob prediction is a good estimation of the generalization
error."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
"""Check singleton ensembles."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
"""Test that it gives proper exception on deficient input."""
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_raises(NotImplementedError,
BaggingClassifier(base).fit(X, y).decision_function, X)
def test_parallel_classification():
"""Check parallel classification."""
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
"""Check that bagging ensembles can be grid-searched."""
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
"""Check base_estimator and its default values."""
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/estimator/canned/linear_testing_utils.py | 14 | 66739 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(session_run_hook.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = variable_scope.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(
feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with ops.Graph().as_default():
variables.Variable(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
variables.Variable([7.0, 8.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column(
'age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with ops.Graph().as_default():
variables.Variable([[10.0]], name=AGE_WEIGHT_NAME)
variables.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
variables.Variable([5.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column_lib.numeric_column('age'),
feature_column_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([20, 40]),
'height': np.array([4, 8])},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([[10.]], name='linear/linear_model/x/weights')
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (feature_column_lib.numeric_column('x', shape=(x_dim,)),)
with ops.Graph().as_default():
variables.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with ops.Graph().as_default():
variables.Variable([[10.]], name='linear/linear_model/x0/weights')
variables.Variable([[20.]], name='linear/linear_model/x1/weights')
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x0': np.array([[2.]]),
'x1': np.array([[3.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
checkpoint_utils.load_variable(self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return state_ops.assign_add(global_step, 1).op
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return state_ops.assign_add(global_step, 1).op
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension],
shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(expected_age_weight,
checkpoint_utils.load_variable(
self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifer:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifer:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifer:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifer:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifer:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# For multi class classifer:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 2.1269)
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredications(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'classes': [label_output_fn(0)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredications(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredications(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredications(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredications(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
| apache-2.0 |
scottlittle/solar-sensors | webapp/solarApp/data_helper_functions_webapp.py | 2 | 8989 | import netCDF4
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap #map stuff
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
from datetime import datetime, timedelta
import pandas as pd
###########
# Below are helper functions for the satellite
###########
def plot_satellite_image(filename):
'''takes in a file and outputs a plot of satellite'''
rootgrp = Dataset(filename, "a", format="NETCDF4")
lons = rootgrp.variables['lon'][:]
lats = rootgrp.variables['lat'][:]
data = rootgrp.variables['data'][:]
rootgrp.close() #need to close before you can open again
# Get some parameters for the Stereographic Projection
m = Basemap(width=800000,height=800000, #create a basemap object with these parameters
resolution='l',projection='stere',\
lat_ts=40,lat_0=39.5,lon_0=-104.5)
xi, yi = m(lons, lats) #map onton x and y for plotting
plt.figure(figsize=(10,10)) # Plot Data
cs = m.pcolor(xi,yi,np.squeeze(data)) #data is 1 x 14 x 36, squeeze makes it 14 x 36
m.drawparallels(np.arange(-80., 81., 1.), labels=[1,0,0,0], fontsize=10) # Add Grid Lines
m.drawmeridians(np.arange(-180., 181., 1.), labels=[0,0,0,1], fontsize=10) # Add Grid Lines
m.drawstates(linewidth=3) # Add state boundaries
cbar = m.colorbar(cs, location='bottom', pad="10%") # Add Colorbar
plt.title('GOES 15 - Sensor 1') # Add Title
plt.show()
def return_satellite_data(filename, filefolder):
''' Input: filename
Output: lons, lats, data'''
rootgrp = Dataset(filefolder + filename, "a", format="NETCDF4") #generic name for data
lons = rootgrp.variables['lon'][:] #extract lons ...
lats = rootgrp.variables['lat'][:] #...lats...
data = rootgrp.variables['data'][:] #...and data from netCDF file
rootgrp.close() #need to close before you can open again
return (lons, lats, np.squeeze(data))
def find_file_details(filefolder, filetype = 'nc'): #match 2 or 3 letters
'''Takes in a filefolder with satellite data and returns list of files,
list of file details, list of dates, and list of channels
Example usage: filefolder = "data/satellite/colorado/summer6months/data"
list_of_files, list_of_files_details, \
list_of_dates, list_of_channels = find_file_details(filefolder)'''
data_files = os.listdir(filefolder) #list files in directory
list_of_files = [] #contains all the files
list_of_files_details = [] #contains information collected from filename
for myfile in data_files:
if (myfile[-2:] == filetype or myfile[-3:] == filetype): #only get netCDF by default
list_of_files.append(myfile)
list_of_files_details.append(myfile.split('.'))
list_of_dates = [] #contains datetime data for files
list_of_channels = [] #contains channel data
for i,_ in enumerate(list_of_files_details):
mytime = list_of_files_details[i][1]+" "+list_of_files_details[i][2]+" "+list_of_files_details[i][3]
mydatetime = datetime.strptime(mytime, '%Y %j %H%M%S')
list_of_dates.append(mydatetime)
list_of_channels.append(list_of_files_details[i][4])
return (list_of_files, list_of_files_details, list_of_dates, list_of_channels)
def find_closest_date(desired_datetime, filefolder):
'''Input: desired datetime, Output: datetime of closest file(s)
Example usage: desired_datetime = datetime(2014, 5, 5, 19)
closest_datetime = find_closest_date(desired_datetime)'''
list_of_files, list_of_files_details, \
list_of_dates, list_of_channels = find_file_details(filefolder)
time_differences = []
for i,time in enumerate(list_of_dates):
time_difference = list_of_dates[i] - desired_datetime
time_differences.append(abs(time_difference).total_seconds())
if min(time_differences) < 10800: #return datetime only if < 3 hours
return list_of_dates[np.argmin(time_differences)]
else:
print "No file with this datetime within 3 hours!"+" Desired datetime = "+ str(desired_datetime)
def find_filename(desired_datetime, desired_channel, filefolder):
'''return filename with desired features
Example usage: desired_channel = 'BAND_01'
desired_datetime = datetime(2014, 4, 2, 12)
print find_filename(desired_datetime, desired_channel)'''
list_of_files, list_of_files_details, \
list_of_dates, list_of_channels = find_file_details(filefolder)
closest_datetime = find_closest_date(desired_datetime, filefolder)
list_of_channel_matches = []
for i,channel in enumerate(list_of_channels):
if channel == desired_channel:
list_of_channel_matches.append(i)
list_of_date_matches = []
for i,date in enumerate(list_of_dates):
if date == closest_datetime:
list_of_date_matches.append(i)
#returns interested file (should just be one)
return list_of_files[list(set(list_of_channel_matches) & set(list_of_date_matches))[0]]
##################################################################
# Above are functions for satellite data. Below are functions for sensor
# and PV Output data
##################################################################
# This works for both sensor and pvoutput data:
def find_file_from_date(desired_date, filefolder, filetype = 'csv'):
'''Input: datetime, folder, filetype; Output: file
Usage: filefolder = "data/pvoutput/pvoutput6months/"
desired_date = datetime(2014, 5, 5) #year, month, day [, hour, minute, second]
find_file_from_date(desired_date, filefolder)'''
data_files = os.listdir(filefolder)
list_of_files = [] #contains all the files
list_of_files_details = [] #contains information collected from filename
for myfile in data_files:
if (myfile[-2:] == filetype or myfile[-3:] == filetype): #only get netCDF by default
list_of_files.append(myfile)
list_of_files_details.append(myfile.split('.'))
for i,val in enumerate(list_of_files_details):
try:
if datetime.strptime(list_of_files_details[i][0], '%Y%m%d') == desired_date:
return list_of_files[i]
except:
pass
def pad(x):
time = x.astype(str).zfill(4)
return time[:2] + ':' + time[2:]
def return_sensor_data(filename, filefolder):
'''Input: desired file and folder
Output: sensor data pandas dataframe
Usage: filefolder = 'data/sensor_data/colorado6months/'
filename = '20140401.csv'
return_sensor_data(myfile, filefolder)'''
df_header = pd.read_csv(filefolder + 'header.csv')
headers = df_header.columns
df_sensor = pd.read_csv(filefolder + filename,header=None) #sensors
df_sensor.columns = headers
df_sensor['MST'] = df_sensor['MST'].map(pad)
#make a datetime index:
df_sensor['datetime'] = pd.to_datetime(df_sensor['Year'].astype(str)+
df_sensor['DOY'].astype(str)+
df_sensor['MST'].astype(str),
format='%Y%j%H:%M')
#convert to UTC time, website shows that they disregard daylight savings time
df_sensor['datetime'] = df_sensor['datetime'] + pd.Timedelta(hours=7)
df_sensor.set_index(['datetime'],inplace=True) #set created column as index ...
df_sensor = df_sensor.resample('H')# ... so that we can resample it (hourly)
return df_sensor
def return_pvoutput_data(filename, filefolder):
'''Input: Desired file and folder
Output: PV Output dataframe
Usage: filefolder = 'data/pvoutput/pvoutput6months/'
filename = '20140401.csv'
return_pvoutput_data(filename, filefolder)'''
df_output = pd.read_csv(filefolder + filename) #pvoutput
#df_output['datetime'] = df_output['datetime'] + pd.Timedelta(hours=6) #convert to utc time
df_output['datetime'] = df_output['datetime'].apply(pd.to_datetime)
df_output['datetime'] = df_output['datetime'] + pd.Timedelta(hours=6) #convert to utc time
df_output.set_index(['datetime'],inplace=True) #set created column as index ...
df_output = df_output.resample('H') # ...so that we can resample it (hourly)
return df_output[['Power']]
################### Below are functions for querying the data to build models ##########
def make_time(mytime, sunlight = 15, days = 182):
'''Input: mytime (startime datetime) [,sunlight (hours), days]
Output: a list of daylight times'''
times = []
for j in range(0,days): #days
for i in range(0,sunlight): #sunlight time
times.append(mytime)
mytime += timedelta(hours = 1)
mytime += timedelta(hours = (24-sunlight)) #night time, skip this
return times
def datetime_from_date(desired_datetime):
'''Input: datetime, Output: date in datetime format
Info: Subtracts 6 hours from datetime to make sure in correct folder'''
from datetime import datetime, timedelta, time
desired_date = (desired_datetime - timedelta(hours=6)).date() #make sure correct date
desired_date = datetime.combine(desired_date, time.min) #get into datetime format
return desired_date
| apache-2.0 |
tawsifkhan/trafficjamto | baselines/readraw.py | 1 | 3241 | #!/usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import pyproj
import time
import argparse
import os
def timeFromHMS(hms):
hmsint = hms.astype(np.int)
seci = np.mod(hmsint,100)
mini = np.mod(hmsint/100,100)
hri = np.mod(hmsint/10000,100)
return (hri*60+mini)*60+seci
def dataFromFile(filename, maxspeed):
data = pd.read_csv(filename).dropna()
if not 'time' in data.columns:
data.columns = ['hmsID','id','route','dirTag','lat','lon','secsSinceReport','whocares','heading']
data['time'] = timeFromHMS(data['hmsID'].values)
data = data.drop('whocares',1)
data['time'] = data['time'] - data['secsSinceReport']
data = data.drop('secsSinceReport',1).drop('dirTag',1)
data = data.sort(['id','time'])
start = time.time()
data['x'], data['y'] = proj(data['lon'].values, data['lat'].values)
allids = data['id'].unique()
# start = time.time()
# for vid in allids:
# vehicledata = data['id']==vid
# dx = data.loc[vehicledata, 'x'].diff().values
# dy = data.loc[vehicledata, 'y'].diff().values
# dt = data.loc[vehicledata, 'time'].diff().values
# dist = np.sqrt(dx*dx+dy*dy)
# data.loc[vehicledata, 'speed'] = dist/(dt+0.1)
# print(time.time()-start)
# start = time.time()
#
grouped = data.groupby('id')
dx = grouped['x'].diff().values
dy = grouped['y'].diff().values
dt = grouped['time'].diff().values
dist = np.sqrt(dx*dx+dy*dy)
data['speed'] = dist/dt
data.dropna()
data = data.loc[(data.speed > 0.5) & (data.speed < maxspeed)]
return data
def binData(data, spatialRes=200, headingRes=45, timeRes=30):
timeRes = timeRes*60.
data['binx'] = np.round(data['x']/spatialRes)*spatialRes
data['biny'] = np.round(data['y']/spatialRes)*spatialRes
data['binlon'], data['binlat'] = proj(data['x'].values, data['y'].values, inverse=True)
data['bintime'] = (np.round(1.0*data['time'].values/timeRes).astype(np.int)*timeRes).astype(np.int)
data['binheading'] = np.round(1.0*data['heading'].values/headingRes).astype(np.int)*headingRes
return data
def groupData(data, minpts=5):
grouped = data.groupby(['binlat','binlon','bintime','binheading'])
grouped = pd.DataFrame(grouped['speed'].agg([len, np.mean, np.std]))
grouped = grouped.loc[grouped.len > 5]
return grouped
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs="+", help="Raw input csv files")
parser.add_argument('-v','--maxspeed', type=float, default=15.)
parser.add_argument('-o','--output', type=str, default="grouped.csv", help="Output file name")
args = parser.parse_args()
proj = pyproj.Proj("+proj=utm +zone=17N, +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
allbinned = []
for filename in args.infile:
basename = os.path.splitext(filename)[0]
data = dataFromFile(filename, args.maxspeed)
data.to_csv(basename+"-speed.csv")
bindata = binData(data)
data.to_csv(basename+"-binned.csv")
allbinned.append(bindata)
grouped = groupData( pd.concat( allbinned ) )
grouped.to_csv(args.output)
| gpl-2.0 |
donK23/shed01 | PyBeispiele/SherlockRec/load_ratings.py | 1 | 2235 | """
Name: load_rartings
Purpose: Load ratings and books from DB
Author: Thomas Treml ([email protected])
Date: 2015-08-31
"""
import numpy as np
import pandas as pd
from sqlalchemy import create_engine, sql
import cPickle
""" DB presets """
from instance.config import PG_USER, PG_PASSWORD
db_uri = "postgresql://" + PG_USER + ":" + PG_PASSWORD + "@localhost/shrecdb"
engine = create_engine(db_uri)
con = engine.connect()
""" Load ratings """
# DB query
query_ratings = "SELECT * FROM ratings"
all_ratings = con.execute(sql.text(query_ratings))
# Data processing
user_ids = []; ts = []; br01 = []; br02 = []; br03 = []; br04 = []; br05 = []; br06 = []; br07 = []; br08 = [];
br09 = []; br10 = []; br11 = []; br12 = []; br13 = []; br14 = []; br15 = []
for rating in all_ratings:
user_ids.append(rating[0])
ts.append(rating[1])
br01.append(rating[2]); br02.append(rating[3]); br03.append(rating[4]); br04.append(rating[5]);
br05.append(rating[6]); br06.append(rating[7]); br07.append(rating[8]);br08.append(rating[9]);
br09.append(rating[10]); br10.append(rating[11]); br11.append(rating[12]); br12.append(rating[13]);
br13.append(rating[14]); br14.append(rating[15]); br15.append(rating[16])
# Dataframe and save to pickle file
ratings = {"USER_ID": user_ids, "RATED_AT": ts, "RAT_B01": br01, "RAT_B02": br02, "RAT_B03": br03, "RAT_B04": br04,
"RAT_B05": br05, "RAT_B06": br06, "RAT_B07": br07, "RAT_B08": br08, "RAT_B09": br09, "RAT_B10": br10,
"RAT_B11": br11, "RAT_B12": br12, "RAT_B13": br13, "RAT_B14": br14, "RAT_B15": br15}
ratings_df = pd.DataFrame(ratings)
cPickle.dump(ratings_df, open("./data/ratings_df.p", "wb"))
print "Ratings records: " + str(len(ratings_df))
print ratings_df.head()
""" Load books """
# DB query
query_books = "SELECT * FROM books"
books = con.execute(sql.text(query_books))
con.close()
# Data processing
book_ids = []; titles = []
for book in books:
book_ids.append(book[0])
titles.append(book[1])
# Dataframe and save to pickle file
books = {"BOOK_ID": book_ids, "TITLE": titles}
books_df = pd.DataFrame(books)
cPickle.dump(books_df, open("./data/books_df.p", "wb"))
print "Book records: " + str(len(books_df))
print books_df.head()
| apache-2.0 |
nborggren/zipline | tests/test_assets.py | 1 | 52689 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from contextlib import contextmanager
from datetime import datetime, timedelta
import pickle
import sys
from unittest import TestCase
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full, int32, int64
import sqlalchemy as sa
from zipline.assets import (
Asset,
Equity,
Future,
AssetFinder,
AssetFinderCachedEquities,
)
from six import itervalues, integer_types
from toolz import valmap
from zipline.assets.futures import (
cme_code_to_month,
FutureChain,
month_to_cme_code
)
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
)
from zipline.assets.asset_db_schema import (
ASSET_DB_VERSION,
_version_table_schema,
)
from zipline.assets.asset_db_migrations import (
downgrade
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
RootSymbolNotFound,
AssetDBVersionError,
SidAssignmentError,
SidsNotFound,
SymbolNotFound,
AssetDBImpossibleDowngrade,
)
from zipline.finance.trading import TradingEnvironment, noop_load
from zipline.utils.test_utils import (
all_subindices,
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
tmp_assets_db,
tmp_asset_finder,
)
@contextmanager
def build_lookup_generic_cases(asset_finder_type):
"""
Generate test cases for the type of asset finder specific by
asset_finder_type for test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'duplicated',
'start_date': dupe_0_start.value,
'end_date': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'symbol': 'duplicated',
'start_date': dupe_1_start.value,
'end_date': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'symbol': 'unique',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': '',
},
],
index='sid')
with tmp_assets_db(equities=frame) as assets_db:
finder = asset_finder_type(assets_db)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
yield (
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'DUPLICATED', dupe_0_start, dupe_0),
(finder, 'DUPLICATED', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'UNIQUE', unique_start, unique),
(finder, 'UNIQUE', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('DUPLICATED', 'UNIQUE'), dupe_0_start, [dupe_0, unique]),
(finder, ('DUPLICATED', 'UNIQUE'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('DUPLICATED', 2, 'UNIQUE', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
)
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertEqual(int32(23), s_23)
self.assertEqual(int64(23), s_23)
self.assertEqual(s_23, int32(23))
self.assertEqual(s_23, int64(23))
# Check all int types (includes long on py2):
for int_type in integer_types:
self.assertEqual(int_type(23), s_23)
self.assertEqual(s_23, int_type(23))
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
# Compare to a value that doesn't fit into a platform int:
self.assertNotEqual(s_23, sys.maxsize + 1)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
@classmethod
def setUpClass(cls):
cls.future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
auto_close_date=pd.Timestamp('2014-01-18', tz='UTC'),
tick_size=.01,
multiplier=500
)
cls.future2 = Future(
0,
symbol='CLG06',
root_symbol='CL',
start_date=pd.Timestamp('2005-12-01', tz='UTC'),
notice_date=pd.Timestamp('2005-12-20', tz='UTC'),
expiration_date=pd.Timestamp('2006-01-20', tz='UTC')
)
env = TradingEnvironment(load=noop_load)
env.write_data(futures_identifiers=[TestFuture.future,
TestFuture.future2])
cls.asset_finder = env.asset_finder
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("auto_close_date=Timestamp('2014-01-18 00:00:00+0000'"
in reprd)
self.assertTrue("tick_size=0.01" in reprd)
self.assertTrue("multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
for field in _futures_defaults.keys():
self.assertTrue(field in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
def test_lookup_future_symbol(self):
"""
Test the lookup_future_symbol method.
"""
om = TestFuture.asset_finder.lookup_future_symbol('OMH15')
self.assertEqual(om.sid, 2468)
self.assertEqual(om.symbol, 'OMH15')
self.assertEqual(om.root_symbol, 'OM')
self.assertEqual(om.notice_date, pd.Timestamp('2014-01-20', tz='UTC'))
self.assertEqual(om.expiration_date,
pd.Timestamp('2014-02-20', tz='UTC'))
self.assertEqual(om.auto_close_date,
pd.Timestamp('2014-01-18', tz='UTC'))
cl = TestFuture.asset_finder.lookup_future_symbol('CLG06')
self.assertEqual(cl.sid, 0)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('#&?!')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('FOOBAR')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('XXX99')
class AssetFinderTestCase(TestCase):
def setUp(self):
self.env = TradingEnvironment(load=noop_load)
self.asset_finder_type = AssetFinder
def test_lookup_symbol_delimited(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'TEST.%d' % i,
'company_name': "company%d" % i,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for i in range(3)
]
)
self.env.write_data(equities_df=frame)
finder = self.asset_finder_type(self.env.engine)
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
# we do it twice to catch caching bugs
for i in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST1', as_of)
# '@' is not a supported delimiter
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST@1', as_of)
# Adding an unnecessary fuzzy shouldn't matter.
for fuzzy_char in ['-', '/', '_', '.']:
self.assertEqual(
asset_1,
finder.lookup_symbol('TEST%s1' % fuzzy_char, as_of)
)
def test_lookup_symbol_fuzzy(self):
metadata = {
0: {'symbol': 'PRTY_HRD'},
1: {'symbol': 'BRKA'},
2: {'symbol': 'BRK_A'},
}
self.env.write_data(equities_data=metadata)
finder = self.env.asset_finder
dt = pd.Timestamp('2013-01-01', tz='UTC')
# Try combos of looking up PRTYHRD with and without a time or fuzzy
# Both non-fuzzys get no result
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', None)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', dt)
# Both fuzzys work
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', dt, fuzzy=True))
# Try combos of looking up PRTY_HRD, all returning sid 0
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt, fuzzy=True))
# Try combos of looking up BRKA, all returning sid 1
self.assertEqual(1, finder.lookup_symbol('BRKA', None))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt))
self.assertEqual(1, finder.lookup_symbol('BRKA', None, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt, fuzzy=True))
# Try combos of looking up BRK_A, all returning sid 2
self.assertEqual(2, finder.lookup_symbol('BRK_A', None))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt, fuzzy=True))
def test_lookup_symbol(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'existing',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
self.env.write_data(equities_df=df)
finder = self.asset_finder_type(self.env.engine)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('NON_EXISTING', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol('EXISTING', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol('EXISTING', date)
self.assertEqual(result.symbol, 'EXISTING')
self.assertEqual(result.sid, i)
def test_lookup_symbol_from_multiple_valid(self):
# This test asserts that we resolve conflicts in accordance with the
# following rules when we have multiple assets holding the same symbol
# at the same time:
# If multiple SIDs exist for symbol S at time T, return the candidate
# SID whose start_date is highest. (200 cases)
# If multiple SIDs exist for symbol S at time T, the best candidate
# SIDs share the highest start_date, return the SID with the highest
# end_date. (34 cases)
# It is the opinion of the author (ssanderson) that we should consider
# this malformed input and fail here. But this is the current indended
# behavior of the code, and I accidentally broke it while refactoring.
# These will serve as regression tests until the time comes that we
# decide to enforce this as an error.
# See https://github.com/quantopian/zipline/issues/837 for more
# details.
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later end date.
{
'sid': 2,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2013-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later start_date
{
'sid': 3,
'symbol': 'multiple',
'start_date': pd.Timestamp('2011-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
]
)
def check(expected_sid, date):
result = finder.lookup_symbol(
'MULTIPLE', date,
)
self.assertEqual(result.symbol, 'MULTIPLE')
self.assertEqual(result.sid, expected_sid)
with tmp_asset_finder(finder_cls=self.asset_finder_type,
equities=df) as finder:
self.assertIsInstance(finder, self.asset_finder_type)
# Sids 1 and 2 are eligible here. We should get asset 2 because it
# has the later end_date.
check(2, pd.Timestamp('2010-12-31'))
# Sids 1, 2, and 3 are eligible here. We should get sid 3 because
# it has a later start_date
check(3, pd.Timestamp('2011-01-01'))
def test_lookup_generic(self):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
with build_lookup_generic_cases(self.asset_finder_type) as cases:
for finder, symbols, reference_date, expected in cases:
results, missing = finder.lookup_generic(symbols,
reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'symbol': 'also_real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'symbol': 'real_but_old',
'start_date': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'symbol': 'real_but_in_the_future',
'start_date': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
self.env.write_data(equities_df=data)
finder = self.asset_finder_type(self.env.engine)
results, missing = finder.lookup_generic(
['REAL', 1, 'FAKE', 'REAL_BUT_OLD', 'REAL_BUT_IN_THE_FUTURE'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'REAL')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'ALSO_REAL')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'REAL_BUT_OLD')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'FAKE')
self.assertEqual(missing[1], 'REAL_BUT_IN_THE_FUTURE')
def test_insert_metadata(self):
data = {0: {'start_date': '2014-01-01',
'end_date': '2015-01-01',
'symbol': "PLAY",
'foo_data': "FOO"}}
self.env.write_data(equities_data=data)
finder = self.asset_finder_type(self.env.engine)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
with self.assertRaises(AttributeError):
equity.foo_data
def test_consume_metadata(self):
# Test dict consumption
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
self.env.write_data(equities_data=dict_to_consume)
finder = self.asset_finder_type(self.env.engine)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
self.env = TradingEnvironment(load=noop_load)
self.env.write_data(equities_df=df)
finder = self.asset_finder_type(self.env.engine)
self.assertEqual('NASDAQ', finder.retrieve_asset(0).exchange)
self.assertEqual('Microsoft', finder.retrieve_asset(1).asset_name)
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
self.env.write_data(equities_identifiers=[equity_asset],
futures_identifiers=[future_asset])
finder = self.asset_finder_type(self.env.engine)
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = ['PLAY', 'MSFT']
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Write data with sid assignment
self.env.write_data(equities_identifiers=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned
finder = self.asset_finder_type(self.env.engine)
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = ['PLAY', 'MSFT']
# Write data without sid assignment, asserting failure
with self.assertRaises(SidAssignmentError):
self.env.write_data(equities_identifiers=metadata,
allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so should be valid.
0: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'notice_date': pd.Timestamp('2015-06-14', tz='UTC'),
'expiration_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'expiration_date': pd.Timestamp('2015-09-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
2: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'expiration_date': pd.Timestamp('2015-12-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Starts trading in August, so not valid.
3: {
'symbol': 'ADX16',
'root_symbol': 'AD',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'expiration_date': pd.Timestamp('2015-12-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
# Notice date comes after expiration
4: {
'symbol': 'ADZ16',
'root_symbol': 'AD',
'notice_date': pd.Timestamp('2016-11-25', tz='UTC'),
'expiration_date': pd.Timestamp('2016-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
# This contract has no start date and also this contract should be
# last in all chains
5: {
'symbol': 'ADZ20',
'root_symbol': 'AD',
'notice_date': pd.Timestamp('2020-11-25', tz='UTC'),
'expiration_date': pd.Timestamp('2020-11-16', tz='UTC')
},
}
self.env.write_data(futures_data=metadata)
finder = self.asset_finder_type(self.env.engine)
dt = pd.Timestamp('2015-05-14', tz='UTC')
dt_2 = pd.Timestamp('2015-10-14', tz='UTC')
dt_3 = pd.Timestamp('2016-11-17', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt)
self.assertEqual(len(ad_contracts), 6)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
self.assertEqual(ad_contracts[5].sid, 5)
# Check that, when some contracts have expired, the chain has advanced
# properly to the next contracts
ad_contracts = finder.lookup_future_chain('AD', dt_2)
self.assertEqual(len(ad_contracts), 4)
self.assertEqual(ad_contracts[0].sid, 2)
self.assertEqual(ad_contracts[3].sid, 5)
# Check that when the expiration_date has passed but the
# notice_date hasn't, contract is still considered invalid.
ad_contracts = finder.lookup_future_chain('AD', dt_3)
self.assertEqual(len(ad_contracts), 1)
self.assertEqual(ad_contracts[0].sid, 5)
# Check that pd.NaT for as_of_date gives the whole chain
ad_contracts = finder.lookup_future_chain('AD', pd.NaT)
self.assertEqual(len(ad_contracts), 6)
self.assertEqual(ad_contracts[5].sid, 5)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = self.asset_finder_type(self.env.engine)
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
def test_compute_lifetimes(self):
num_assets = 4
trading_day = self.env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_equity_info(
num_assets=num_assets,
first_start=first_start,
frequency=self.env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
self.env.write_data(equities_df=frame)
finder = self.env.asset_finder
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
expected_no_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
# This way of doing the checks is redundant, but very
# clear.
if start <= date <= end:
expected_with_start_raw[i, j] = True
if start < date:
expected_no_start_raw[i, j] = True
expected_with_start = pd.DataFrame(
data=expected_with_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=True)
assert_frame_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(
data=expected_no_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=False)
assert_frame_equal(result, expected_no_start)
def test_sids(self):
# Ensure that the sids property of the AssetFinder is functioning
self.env.write_data(equities_identifiers=[1, 2, 3])
sids = self.env.asset_finder.sids
self.assertEqual(3, len(sids))
self.assertTrue(1 in sids)
self.assertTrue(2 in sids)
self.assertTrue(3 in sids)
def test_group_by_type(self):
equities = make_simple_equity_info(
range(5),
start_date=pd.Timestamp('2014-01-01'),
end_date=pd.Timestamp('2015-01-01'),
)
futures = make_commodity_future_info(
first_sid=6,
root_symbols=['CL'],
years=[2014],
)
# Intersecting sid queries, to exercise loading of partially-cached
# results.
queries = [
([0, 1, 3], [6, 7]),
([0, 2, 3], [7, 10]),
(list(equities.index), list(futures.index)),
]
with tmp_asset_finder(equities=equities, futures=futures) as finder:
for equity_sids, future_sids in queries:
results = finder.group_by_type(equity_sids + future_sids)
self.assertEqual(
results,
{'equity': set(equity_sids), 'future': set(future_sids)},
)
@parameterized.expand([
(Equity, 'retrieve_equities', EquitiesNotFound),
(Future, 'retrieve_futures_contracts', FutureContractsNotFound),
])
def test_retrieve_specific_type(self, type_, lookup_name, failure_type):
equities = make_simple_equity_info(
range(5),
start_date=pd.Timestamp('2014-01-01'),
end_date=pd.Timestamp('2015-01-01'),
)
max_equity = equities.index.max()
futures = make_commodity_future_info(
first_sid=max_equity + 1,
root_symbols=['CL'],
years=[2014],
)
equity_sids = [0, 1]
future_sids = [max_equity + 1, max_equity + 2, max_equity + 3]
if type_ == Equity:
success_sids = equity_sids
fail_sids = future_sids
else:
fail_sids = equity_sids
success_sids = future_sids
with tmp_asset_finder(equities=equities, futures=futures) as finder:
# Run twice to exercise caching.
lookup = getattr(finder, lookup_name)
for _ in range(2):
results = lookup(success_sids)
self.assertIsInstance(results, dict)
self.assertEqual(set(results.keys()), set(success_sids))
self.assertEqual(
valmap(int, results),
dict(zip(success_sids, success_sids)),
)
self.assertEqual(
{type_},
{type(asset) for asset in itervalues(results)},
)
with self.assertRaises(failure_type):
lookup(fail_sids)
with self.assertRaises(failure_type):
# Should fail if **any** of the assets are bad.
lookup([success_sids[0], fail_sids[0]])
def test_retrieve_all(self):
equities = make_simple_equity_info(
range(5),
start_date=pd.Timestamp('2014-01-01'),
end_date=pd.Timestamp('2015-01-01'),
)
max_equity = equities.index.max()
futures = make_commodity_future_info(
first_sid=max_equity + 1,
root_symbols=['CL'],
years=[2014],
)
with tmp_asset_finder(equities=equities, futures=futures) as finder:
all_sids = finder.sids
self.assertEqual(len(all_sids), len(equities) + len(futures))
queries = [
# Empty Query.
(),
# Only Equities.
tuple(equities.index[:2]),
# Only Futures.
tuple(futures.index[:3]),
# Mixed, all cache misses.
tuple(equities.index[2:]) + tuple(futures.index[3:]),
# Mixed, all cache hits.
tuple(equities.index[2:]) + tuple(futures.index[3:]),
# Everything.
all_sids,
all_sids,
]
for sids in queries:
equity_sids = [i for i in sids if i <= max_equity]
future_sids = [i for i in sids if i > max_equity]
results = finder.retrieve_all(sids)
self.assertEqual(sids, tuple(map(int, results)))
self.assertEqual(
[Equity for _ in equity_sids] +
[Future for _ in future_sids],
list(map(type, results)),
)
self.assertEqual(
(
list(equities.symbol.loc[equity_sids]) +
list(futures.symbol.loc[future_sids])
),
list(asset.symbol for asset in results),
)
@parameterized.expand([
(EquitiesNotFound, 'equity', 'equities'),
(FutureContractsNotFound, 'future contract', 'future contracts'),
(SidsNotFound, 'asset', 'assets'),
])
def test_error_message_plurality(self,
error_type,
singular,
plural):
try:
raise error_type(sids=[1])
except error_type as e:
self.assertEqual(
str(e),
"No {singular} found for sid: 1.".format(singular=singular)
)
try:
raise error_type(sids=[1, 2])
except error_type as e:
self.assertEqual(
str(e),
"No {plural} found for sids: [1, 2].".format(plural=plural)
)
class AssetFinderCachedEquitiesTestCase(AssetFinderTestCase):
def setUp(self):
self.env = TradingEnvironment(load=noop_load)
self.asset_finder_type = AssetFinderCachedEquities
class TestFutureChain(TestCase):
@classmethod
def setUpClass(cls):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
env = TradingEnvironment(load=noop_load)
env.write_data(futures_data=metadata)
cls.asset_finder = env.asset_finder
@classmethod
def tearDownClass(cls):
del cls.asset_finder
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# Sids 0, 1, & 2 have started, 3 has not yet started, but all are in
# the chain
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 4)
# Sid 0 is still valid on its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 4)
# Sid 0 is now invalid, leaving Sids 1 & 2 valid (and 3 not started).
cl = FutureChain(self.asset_finder, lambda: '2005-12-21', 'CL')
self.assertEqual(len(cl), 3)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-21', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-21', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_iter(self):
""" Test the __iter__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
for i, contract in enumerate(cl):
self.assertEqual(contract, i)
# First contract is now invalid, so sids will be offset by one
cl = FutureChain(self.asset_finder, lambda: '2005-12-21', 'CL')
for i, contract in enumerate(cl):
self.assertEqual(contract, i + 1)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date=pd.Timestamp('2006-02-01', tz='UTC'))
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = pd.Timestamp('2006-02-01', tz='UTC')
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = pd.Timestamp('2005-02-01', tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp(datetime(year=2005, month=2, day=1), tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01', tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# Test that the as_of() method works with str args
feb_str = '2006-02-01'
cl_feb = cl.as_of(feb_str)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain.
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
def test_cme_code_to_month(self):
codes = {
'F': 1, # January
'G': 2, # February
'H': 3, # March
'J': 4, # April
'K': 5, # May
'M': 6, # June
'N': 7, # July
'Q': 8, # August
'U': 9, # September
'V': 10, # October
'X': 11, # November
'Z': 12 # December
}
for key in codes:
self.assertEqual(codes[key], cme_code_to_month(key))
def test_month_to_cme_code(self):
codes = {
1: 'F', # January
2: 'G', # February
3: 'H', # March
4: 'J', # April
5: 'K', # May
6: 'M', # June
7: 'N', # July
8: 'Q', # August
9: 'U', # September
10: 'V', # October
11: 'X', # November
12: 'Z', # December
}
for key in codes:
self.assertEqual(codes[key], month_to_cme_code(key))
class TestAssetDBVersioning(TestCase):
def test_check_version(self):
env = TradingEnvironment(load=noop_load)
version_table = env.asset_finder.version_info
# This should not raise an error
check_version_info(version_table, ASSET_DB_VERSION)
# This should fail because the version is too low
with self.assertRaises(AssetDBVersionError):
check_version_info(version_table, ASSET_DB_VERSION - 1)
# This should fail because the version is too high
with self.assertRaises(AssetDBVersionError):
check_version_info(version_table, ASSET_DB_VERSION + 1)
def test_write_version(self):
env = TradingEnvironment(load=noop_load)
metadata = sa.MetaData(bind=env.engine)
version_table = _version_table_schema(metadata)
version_table.delete().execute()
# Assert that the version is not present in the table
self.assertIsNone(sa.select((version_table.c.version,)).scalar())
# This should fail because the table has no version info and is,
# therefore, consdered v0
with self.assertRaises(AssetDBVersionError):
check_version_info(version_table, -2)
# This should not raise an error because the version has been written
write_version_info(version_table, -2)
check_version_info(version_table, -2)
# Assert that the version is in the table and correct
self.assertEqual(sa.select((version_table.c.version,)).scalar(), -2)
# Assert that trying to overwrite the version fails
with self.assertRaises(sa.exc.IntegrityError):
write_version_info(version_table, -3)
def test_finder_checks_version(self):
# Create an env and give it a bogus version number
env = TradingEnvironment(load=noop_load)
metadata = sa.MetaData(bind=env.engine)
version_table = _version_table_schema(metadata)
version_table.delete().execute()
write_version_info(version_table, -2)
check_version_info(version_table, -2)
# Assert that trying to build a finder with a bad db raises an error
with self.assertRaises(AssetDBVersionError):
AssetFinder(engine=env.engine)
# Change the version number of the db to the correct version
version_table.delete().execute()
write_version_info(version_table, ASSET_DB_VERSION)
check_version_info(version_table, ASSET_DB_VERSION)
# Now that the versions match, this Finder should succeed
AssetFinder(engine=env.engine)
def test_downgrade(self):
# Attempt to downgrade a current assets db all the way down to v0
env = TradingEnvironment(load=noop_load)
conn = env.engine.connect()
downgrade(env.engine, 0)
# Verify that the db version is now 0
metadata = sa.MetaData(conn)
metadata.reflect(bind=env.engine)
version_table = metadata.tables['version_info']
check_version_info(version_table, 0)
# Check some of the v1-to-v0 downgrades
self.assertTrue('futures_contracts' in metadata.tables)
self.assertTrue('version_info' in metadata.tables)
self.assertFalse('tick_size' in
metadata.tables['futures_contracts'].columns)
self.assertTrue('contract_multiplier' in
metadata.tables['futures_contracts'].columns)
def test_impossible_downgrade(self):
# Attempt to downgrade a current assets db to a
# higher-than-current version
env = TradingEnvironment(load=noop_load)
with self.assertRaises(AssetDBImpossibleDowngrade):
downgrade(env.engine, ASSET_DB_VERSION + 5)
| apache-2.0 |
smccaffrey/PIRT_ASU | tests/client_builds/PHY132_Fall2017/dueDates_V2.py | 2 | 3544 | import selenium
import getpass
import time
import sys
import logging as log
import pandas as pd
from selenium import webdriver as wbd
from selenium.webdriver.common.by import By
sys.path.append('/Users/smccaffrey/Desktop/BlackboardAssistant/core/')
#from automation import test_options as prelabs
from automation import Editor as prelabs
from automation import assignment_options as lab_reports
from automation import SideBar
from automation import authorization
from automation import SectionSelector
### Creates the browser instance in which all operations take place ###
#driver = wbd.Chrome('/Users/smccaffrey/Desktop/BlackboardAssistant/lib/chromedriver2_26')
driver = wbd.Chrome()
filename = '/Users/smccaffrey/Desktop/BlackboardAssistant/tests/client_builds/PHY132_Fall2017/PHY132_Fall2017_v2.csv'
p = 'PHY 132: University Physics Lab II (2017 Fall)-'
URL = 'https://myasucourses.asu.edu/webapps/portal/execute/tabs/tabAction?tab_tab_group_id=_1_1'
### Parsers excel workbook (must be .csv file) ###
def parser(filename):
df1 = pd.read_csv(filename, dtype=str, delimiter=',', header=None)
return df1
### Update Prelabs information ###
def updater(d, p, URL, arr, module1, module2, dryrun=True):
i = 1
for i in range(1, len(arr[0])):
SectionSelector(d).find_section(module = p, section = arr[0][i], wait = 5)
SideBar(d).navigate_to(element = 'PRELABS', wait = 5)
n = 1
for n in range (1, 11):
prelabs(d).assignmentSelector(element = arr[n+2][0], wait = 5)
print("Editing SECTION: " + str(arr[0][i]) + " " + arr[n+2][0])
prelabs(d).editTestOptions(wait = 3)
prelabs(d).startRestrictCheck(state = False)
prelabs(d).endRestrictCheck(state = False)
prelabs(d).dueDate(date = arr[n+2][i])
prelabs(d).dueDateTime(time = arr[1][i])
prelabs(d).dueDateCheck(state = True)
"""
for x in range(0, 2):
prelabs(d).dueDateCheck(state = True)
for x in range(0, 2):
prelabs(d).dueDateCheck(state = True)
for x in range(0, 1):
prelabs(d).lateSubmissionCheck(state = True)
"""
pause = raw_input("Press <ENTER> to continue: ")
if not dryrun:
prelabs(d).submit(wait = 7)
prelabs(d).cancel(wait = 7)
d.find_element_by_link_text(module2).click()
time.sleep(3)
for n in range(1, 10):
lab_reports.assignmentSelector(driver = d, module = module2, assignment = arr[n+12][0])
print("Editing SECTION: " + str(arr[0][i]) + " " + arr[n+12][0])
time.sleep(5)
lab_reports.edit_test_options(d)
time.sleep(3)
lab_reports.start_restrict(d, False)
lab_reports.end_restrict(d, False)
lab_reports._dueDate(d, True)
lab_reports.dp_dueDate_date(d, arr[n+12][i])
lab_reports.tp_dueDate_time(d, arr[2][i])
#pause = raw_input("Press <ENTER> to continue: ")
lab_reports.cancel(d)
d.get(URL)
time.sleep(4)
### test function ###
def test_func(d, filename, dryrun=False):
parser(filename)
if not dryrun:
authorization.login(driver = d, url = URL, wait = 10)
authorization.dual_factor(driver = d, wait = 15)
updater(d, p, URL, parser(filename), module1 = 'PRELABS', module2 = 'Submit Lab Reports')
if __name__ == '__main__':
test_func(driver, filename)
| apache-2.0 |
quietcoolwu/myBuildingMachineLearningSystemsWithPython-second_edition | ch11/demo_pca.py | 25 | 4333 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
from sklearn import linear_model, decomposition
from sklearn import lda
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
def plot_simple_demo_1():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = (x1 > 5) | (x2 > 5)
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_2():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = x1 > x2
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_lda():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
good = x1 > x2
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
lda_inst = lda.LDA(n_components=1)
Xtrans = lda_inst.fit_transform(X, good)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "lda_demo.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_simple_demo_1()
plot_simple_demo_2()
plot_simple_demo_lda()
| mit |
MoamerEncsConcordiaCa/tensorflow | tensorflow/examples/learn/iris.py | 35 | 1654 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
AnasGhrab/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
dpaiton/OpenPV | pv-core/python/pvtools/display.py | 2 | 2881 | from .readpvpfile import readpvpfile
#from .pv_object import PV_Object
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
#FILE NOT TESTED
def interpret(arg):
if type(arg) is str:
arg = readpvpfile(arg)
#assert type(arg) is PV_Object
return arg
def getActive(data):
numFrames = data["values"].shape(axis=0)
actList = np.zeros((numFrames))
#Can only be layer activity
assert (data.header['filetype'] == 4 or data.header['filetype'] == 6)
for frame in range(numFrames):
allValues = data["values"]
#dense
if(data.header['filetype'] == 4):
actList[frame] = np.count_nonzero(allValues[frame, :, :, :])
else:
actList[frame] = allValues.getrow(frame).nnz
return actList
def getPercentActive(data):
total = data.header['nx'] * data.header['ny'] * data.header['nf']
return float(data.getActive()) / float(total)
def getError(data, *args):
assert data.header['filetype'] == 4
numDataFrames = data["values"].shape(axis=0)
if args:
outList = []
for a in args:
assert type(a) == type(data)
assert a.header['filetype'] == 4
numAFrames = a["values"].shape(axis=0)
errList = np.zeros(min(numDataFrames, numAFrames))
for frame in range(len(errList)):
errList[frame] = np.linalg.norm((data["values"][frame, :, :, :] - a["values"][frame, :, :, :]))
outList.append(errList)
if len(outList) == 1:
return outList[0]
return outList
else:
errList = np.zeros(numDataFrames)
for frame in range(numDataFrames):
errList[frame] = np.linalg.norm(data["values"][frame, :, :, :])
return errList
def view(data,frame=0):
assert type(frame) is int
data = interpret(data)
if data.header['filetype'] == 4:
plt.imshow(data["values"][frame, :, :, :],
interpolation='nearest')
plt.axis('off')
plt.show()
if data.header['filetype'] == 5:
axes = np.ceil(np.sqrt(data.header['numpatches']))
for patch in range(data.header['numpatches']):
plt.subplot(axes,axes,patch+1)
plt.imshow(data["values"][frame, 0, patch, :, :, :],
interpolation='nearest')
plt.axis('off')
plt.show()
def showErrorPlot(image, *args):
image = interpret(image)
if args:
for arg in args:
plt.figure()
plt.plot(getError(image, interpret(arg)))
plt.show()
else:
plt.figure()
plt.plot(getError(image))
plt.show()
def showNumActivePlot(data):
data = inpterpret(data)
plt.figure()
plt.plot(getActive(data))
plt.show()
def showSparsityPlot(data):
data = interpret(data)
plt.figure()
plt.plot(getPercentActive(data))
plt.show()
| epl-1.0 |
mjm159/sunpy | sunpy/gui/ui/mainwindow/widgets/figure_canvas.py | 2 | 4292 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Plot widgets for the PlotMan
subclassed from the matplotlib FigureCanvasQTAgg
Author: Matt Earnshaw <[email protected]>
"""
import sunpy
import matplotlib
from PyQt4.QtGui import QSizePolicy
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
class FigureCanvas(FigureCanvasQTAgg):
""" General plot widget, resizes to fit window """
def __init__(self, map_, parent=None):
self.parent = parent
self.map = map_
self.figure = Figure()
self.map.plot(figure=self.figure)
self.axes = self.figure.gca()
# Code duplication from plotman.py!
if self.map.norm() is not None:
# If pre-normalised, get inital clips from the matplotlib norm
self.vmin = self.map.norm().vmin
self.vmax = self.map.norm().vmax
else:
# Otherwise, get initial clips from the map data directly.
self.vmin = self.map.min()
self.vmax = self.map.max()
self.scaling = "Linear" # Shouldn't be a default.
# Matplotlib kwargs
self.params = {
"cmap": self.map.cmap,
"norm": self.map.norm(),
}
FigureCanvasQTAgg.__init__(self, self.figure)
FigureCanvasQTAgg.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def get_cmap(self, cmapname, gamma=None):
""" Given a sunpy or matplotlib cmap name, returns the cmap. """
mpl_cmaps = sorted(m for m in matplotlib.pyplot.cm.datad
if not m.endswith("_r"))
if cmapname in mpl_cmaps:
return matplotlib.cm.get_cmap(cmapname)
else:
return sunpy.cm.get_cmap(cmapname)
def get_norm(self):
""" Return a matplotlib Normalize according to clip values and scale type """
if self.vmin < self.vmax:
if self.scaling == "Linear": # This is not robust!
return matplotlib.colors.Normalize(vmin=self.vmin,
vmax=self.vmax)
elif self.scaling == "Logarithmic":
if self.vmin <= 0:
# Cannot log scale 0 or negative data
self.window().colorErrorLabel.setText("Cannot log scale zero or negative data!")
return self.params["norm"]
else:
return matplotlib.colors.LogNorm(vmin=self.vmin,
vmax=self.vmax)
else:
self.window().colorErrorLabel.setText("Min clip value must not exceed max clip value!")
return self.params["norm"]
def update_figure(self, cmapname=None, vmin=None, vmax=None, scaling=None):
# Destroy any previous figures
#matplotlib.pyplot.close()
self.figure.clear()
# Clear error messages
self.window().colorErrorLabel.clear()
if cmapname is not None:
self.params.update({"cmap": self.get_cmap(cmapname)})
elif vmax is not None:
self.vmax = vmax
self.params.update({"norm": self.get_norm()})
elif vmin is not None:
self.vmin = vmin
self.params.update({"norm": self.get_norm()})
elif scaling is not None:
self.scaling = scaling
self.params.update({"norm": self.get_norm()})
#self.figure = Figure()
self.map.plot(figure=self.figure, **self.params)
self.axes = self.figure.gca()
self.resize_figure()
def reset_figure(self):
self.figure = Figure()
self.map.plot(figure=self.figure)
self.axes = self.figure.gca()
self.resize_figure()
self.window().initialize_color_options()
def resize_figure(self):
self.updateGeometry()
self.resize(1, 1) # It works, OK?
self.draw()
@property
def cmap(self):
return self.params["cmap"]
@property
def norm(self):
return self.params["norm"]
| bsd-2-clause |
bikong2/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e183.py | 2 | 6808 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
e180
* mse
e181
* back to scaled cost
* different architecture:
- convd1 at input (2x)
- then 3 LSTM layers, each with a 2x conv in between
- no diff input
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=None,#[200, 100, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1520,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
subsample_target=2,
input_padding=2,
include_diff=False
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=5000,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=.00001, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 1,
'filter_length': 3,
'stride': 1,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
walterreade/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 150 | 3651 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
carthach/essentia | src/examples/python/musicbricks-tutorials/1-stft_analsynth.py | 2 | 1197 |
import essentia
import essentia.streaming as es
# import matplotlib for plotting
import matplotlib.pyplot as plt
import numpy as np
import sys
# algorithm parameters
framesize = 1024
hopsize = 256
# loop over all frames
audioout = np.array(0)
counter = 0
import matplotlib.pylab as plt
# input and output files
import os.path
tutorial_dir = os.path.dirname(os.path.realpath(__file__))
inputFilename = os.path.join(tutorial_dir, 'singing-female.wav')
outputFilename = os.path.join(tutorial_dir, 'singing-female-out-stft.wav')
out = np.array(0)
loader = es.MonoLoader(filename = inputFilename, sampleRate = 44100)
pool = essentia.Pool()
fcut = es.FrameCutter(frameSize = framesize, hopSize = hopsize, startFromZero = False);
w = es.Windowing(type = "hann");
fft = es.FFT(size = framesize);
ifft = es.IFFT(size = framesize);
overl = es.OverlapAdd (frameSize = framesize, hopSize = hopsize, gain = 1./framesize );
awrite = es.MonoWriter (filename = outputFilename, sampleRate = 44100);
loader.audio >> fcut.signal
fcut.frame >> w.frame
w.frame >> fft.frame
fft.fft >> ifft.fft
ifft.frame >> overl.frame
overl.signal >> awrite.audio
overl.signal >> (pool, 'audio')
essentia.run(loader)
| agpl-3.0 |
Haunter17/MIR_SU17 | exp8/exp8d.py | 1 | 7766 | import numpy as np
import tensorflow as tf
import h5py
from sklearn.preprocessing import OneHotEncoder
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import scipy.io
# Functions for initializing neural nets parameters
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float64)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')
# Download data from .mat file into numpy array
print('==> Experiment 8d')
filepath = '/scratch/ttanpras/exp8a_d7_1s.mat'
print('==> Loading data from {}'.format(filepath))
f = h5py.File(filepath)
data_train = np.array(f.get('trainingFeatures'))
data_val = np.array(f.get('validationFeatures'))
X_test = np.array(f.get('window_testFeatures'))
y_test = np.array(f.get('window_testLabels'))
del f
print('==> Data sizes:',data_train.shape, data_val.shape, X_test.shape, y_test.shape)
# Transform labels into on-hot encoding form
enc = OneHotEncoder()
y_test = enc.fit_transform(y_test.copy()).astype(int).toarray()
'''
NN config parameters
'''
sub_window_size = 32
num_features = 169*sub_window_size
num_frames = 32
hidden_layer_size = 64
num_classes = 71
print("Number of features:", num_features)
print("Number of songs:",num_classes)
# Reshape input features
X_train = np.reshape(data_train,(-1, num_features))
X_val = np.reshape(data_val,(-1, num_features))
print("Input sizes:", X_train.shape, X_val.shape)
y_train = []
y_val = []
# Add Labels
for label in range(num_classes):
for sampleCount in range(X_train.shape[0]//num_classes):
y_train.append([label])
for sampleCount in range(X_val.shape[0]//num_classes):
y_val.append([label])
X_train = np.concatenate((X_train, y_train), axis=1)
X_val = np.concatenate((X_val, y_val), axis=1)
# Shuffle
np.random.shuffle(X_train)
np.random.shuffle(X_val)
# Separate coefficients and labels
y_train = X_train[:, -1].reshape(-1, 1)
X_train = X_train[:, :-1]
y_val = X_val[:, -1].reshape(-1, 1)
X_val = X_val[:, :-1]
print('==> Data sizes:',X_train.shape, y_train.shape,X_val.shape, y_val.shape)
y_train = enc.fit_transform(y_train.copy()).astype(int).toarray()
y_val = enc.fit_transform(y_val.copy()).astype(int).toarray()
plotx = []
ploty_train = []
ploty_val = []
# Set-up NN layers
x = tf.placeholder(tf.float64, [None, num_features])
W1 = weight_variable([num_features, hidden_layer_size])
b1 = bias_variable([hidden_layer_size])
OpW1 = tf.placeholder(tf.float64, [num_features, hidden_layer_size])
Opb1 = tf.placeholder(tf.float64, [hidden_layer_size])
# Hidden layer activation function: ReLU
h = tf.matmul(x, W1) + b1
h1 = tf.nn.relu(h)
W2 = weight_variable([hidden_layer_size, num_classes])
b2 = bias_variable([num_classes])
OpW2 = tf.placeholder(tf.float64, [hidden_layer_size, num_classes])
Opb2 = tf.placeholder(tf.float64, [num_classes])
# Softmax layer (Output), dtype = float64
y = tf.matmul(h1, W2) + b2
# NN desired value (labels)
y_ = tf.placeholder(tf.float64, [None, num_classes])
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.InteractiveSession()
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
sess.run(tf.initialize_all_variables())
# Training
numTrainingVec = len(X_train)
batchSize = 500
numEpochs = 1500
bestValErr = 10000
bestValEpoch = 0
startTime = time.time()
for epoch in range(numEpochs):
for i in range(0,numTrainingVec,batchSize):
# Batch Data
batchEndPoint = min(i+batchSize, numTrainingVec)
trainBatchData = X_train[i:batchEndPoint]
trainBatchLabel = y_train[i:batchEndPoint]
train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel})
# Print accuracy
if epoch%5 == 0 or epoch == numEpochs-1:
plotx.append(epoch)
train_error = cross_entropy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
val_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val})
train_acc = accuracy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})
val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val})
ploty_train.append(train_error)
ploty_val.append(val_error)
print("epoch: %d, train acc %g, val acc %g, train error %g, val error %g"%(epoch, train_acc, val_acc, train_error, val_error))
if val_error < bestValErr:
bestValErr = val_error
bestValEpoch = epoch
OpW1 = W1
Opb1 = b1
OpW2 = W2
Opb2 = b2
endTime = time.time()
print("Elapse Time:", endTime - startTime)
print("Best validation error: %g at epoch %d"%(bestValErr, bestValEpoch))
# Restore best model for early stopping
W1 = OpW1
b1 = Opb1
W2 = OpW2
b2 = Opb2
saveweight = {}
saveweight['W1'] = np.array(W1.eval())
saveweight['b1'] = np.array(b1.eval())
scipy.io.savemat('exp8d_1500ep_weight.mat',saveweight)
print('==> Generating error plot...')
errfig = plt.figure()
trainErrPlot = errfig.add_subplot(111)
trainErrPlot.set_xlabel('Number of Epochs')
trainErrPlot.set_ylabel('Cross-Entropy Error')
trainErrPlot.set_title('Error vs Number of Epochs')
trainErrPlot.scatter(plotx, ploty_train)
valErrPlot = errfig.add_subplot(111)
valErrPlot.scatter(plotx, ploty_val)
errfig.savefig('exp8d_64_1500ep.png')
'''
GENERATING REPRESENTATION OF NOISY FILES
'''
namelist = ['orig','comp5','comp10','str5','str10','ampSat_(-15)','ampSat_(-10)','ampSat_(-5)', \
'ampSat_(5)','ampSat_(10)','ampSat_(15)','pitchShift_(-1)','pitchShift_(-0.5)', \
'pitchShift_(0.5)','pitchShift_(1)','rev_dkw','rev_gal','rev_shan0','rev_shan1', \
'rev_gen','crowd-15','crowd-10','crowd-5','crowd0','crowd5','crowd10','crowd15', \
'crowd100','rest-15','rest-10','rest-5','rest0','rest5','rest10','rest15', \
'rest100','AWGN-15','AWGN-10','AWGN-5','AWGN0','AWGN5','AWGN10','AWGN15', 'AWGN100']
outdir = '/scratch/ttanpras/taylorswift_noisy_processed/'
repDict = {}
repRowDict = {}
repZeroDict = {}
# Loop over each CQT files, not shuffled
for count in range(len(namelist)):
name = namelist[count]
filename = outdir + name + '.mat'
cqt = scipy.io.loadmat(filename)['Q']
cqt = np.transpose(np.array(cqt))
# Group into windows of 32 without overlapping
# Discard any leftover frames
num_windows = cqt.shape[0] // 32
cqt = cqt[:32*num_windows]
X = np.reshape(cqt,(num_windows, num_features))
# Feed window through model (Only 1 layer of weight w/o non-linearity)
rep = h.eval(feed_dict={x:X})
# Threshold output with median of neuron
medRow = np.median(rep,axis=0)
repRow = np.array([ [ int(rep[w][i]>medRow[i]) for i in range(hidden_layer_size)] for w in range(num_windows)])
# Threshold output with 0
repZero = np.array([ [ int(rep[w][i]>0) for i in range(hidden_layer_size)] for w in range(num_windows)])
# Put the output representation into a dictionary
repDict['n'+str(count)] = rep
repRowDict['r'+str(count)] = repRow
repZeroDict['z'+str(count)] = repZero
scipy.io.savemat('exp8d_64_repNon.mat',repDict)
scipy.io.savemat('exp8d_64_repRow.mat',repRowDict)
scipy.io.savemat('exp8d_64_repZero.mat',repZeroDict)
| mit |
ColaLightOriginal/GraphV | Source/Main_Window_Class.py | 1 | 25510 | from gi.repository import Gtk, Gdk, GdkPixbuf, GObject, Pango
from graph_tool.all import *
from Database_Window import *
from About import *
import numpy as np
from numpy.random import random
import matplotlib
import shutil
import os
from operator import itemgetter
class MainWindow(Gtk.Window):
graph = None
def __init__(self, graph):
Gtk.Window.__init__(self, title = "MainWindow")
self.graph = graph
self.set_border_width(10)
self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
grid = Gtk.Grid()
self.add(grid)
pathsIter = 0
validPaths = []
screen = Gdk.Screen.get_default()
buttonWidth = 3
#Row1 - Buttons
self.buttonOpen = Gtk.Button.new_from_stock(Gtk.STOCK_CONNECT)
self.buttonOpen.connect("clicked", self.openDatabase)
self.buttonOpen.set_border_width(buttonWidth)
grid.attach(self.buttonOpen,1,0,1,1)
# self.buttonOpenFile = Gtk.Button.new_from_stock(Gtk.STOCK_OPEN)
# self.buttonOpenFile.connect("clicked", self.onFileClicked)
# self.buttonOpenFile.set_border_width(buttonWidth)
# grid.attach(self.buttonOpenFile,2,0,1,1)
self.buttonSave = Gtk.Button.new_from_stock(Gtk.STOCK_SAVE)
self.buttonSave.connect("clicked", self.onFolderClicked)
self.buttonSave.set_border_width(buttonWidth)
grid.attach(self.buttonSave,3,0,1,1)
self.buttonAbout = Gtk.Button.new_from_stock(Gtk.STOCK_ABOUT)
self.buttonAbout.connect("clicked", self.onAboutClicked)
self.buttonAbout.set_border_width(buttonWidth)
grid.attach(self.buttonAbout,2,0,1,1)
self.logo = Gtk.Image()
self.logo.set_from_file('./Resources/logo.png')
grid.attach(self.logo,2,2,1,1)
self.labelTitle = Gtk.Label("GraphV")
self.labelTitle.modify_font(Pango.FontDescription("Ubuntu 50"))
self.labelTitle.set_xalign(0.5)
self.labelTitle.set_size_request(400,100)
grid.attach(self.labelTitle,0,2,3,1)
#Row3
self.labelStatistics = Gtk.Label("Statistics Menu")
self.labelStatistics.modify_font(Pango.FontDescription("Ubuntu 16"))
self.labelStatistics.set_xalign(0.5)
self.labelStatistics.set_size_request(100,75)
grid.attach(self.labelStatistics,0,3,4,1)
#Row4
self.labelShow = Gtk.Label("Show")
self.labelShow.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelShow.set_xalign(0.5)
self.labelShow.set_yalign(0.5)
self.labelShow.set_size_request(50,35)
grid.attach(self.labelShow,0,4,1,1)
self.combo = Gtk.ComboBoxText()
self.combo.append("0", "")
self.combo.append("1", "Vertices")
self.combo.append("2", "Edges")
self.combo.connect("changed", self.comboShow)
grid.attach(self.combo,1,4,1,1)
#Row5
self.labelFind = Gtk.Label("Find Employee")
self.labelFind.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelFind.set_xalign(0.5)
self.labelFind.set_yalign(0.5)
self.labelFind.set_size_request(50,50)
grid.attach(self.labelFind,0,5,1,1)
self.textBoxFind = Gtk.Entry()
grid.attach(self.textBoxFind,1,5,1,1)
self.labelFind = Gtk.Label("Deep")
self.labelFind.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelFind.set_xalign(0.5)
self.labelFind.set_yalign(0.5)
self.labelFind.set_size_request(50,50)
grid.attach(self.labelFind,2,5,1,1)
self.comboDeep = Gtk.ComboBoxText()
self.comboDeep.append("0", "")
self.comboDeep.append("1", "0")
self.comboDeep.append("2", "1")
self.comboDeep.append("3", "2")
self.comboDeep.append("4", "3")
self.comboDeep.connect("changed", self.printFind)
grid.attach(self.comboDeep,3,5,1,1)
#Row6
self.labelFindSpec = Gtk.Label("Find Specialization")
self.labelFindSpec.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelFindSpec.set_xalign(0.5)
self.labelFindSpec.set_yalign(0.5)
self.labelFindSpec.set_size_request(50,50)
grid.attach(self.labelFindSpec,0,6,1,1)
self.textBoxFindSpec = Gtk.Entry()
self.textBoxFindSpec.connect("key_press_event", self.printSpecs)
grid.attach(self.textBoxFindSpec,1,6,1,1)
#Row7
self.comboChooseAlg = Gtk.ComboBoxText()
self.comboChooseAlg.append("0", "Path searching:")
# self.comboChooseAlg.append("1", "BFS")
self.comboChooseAlg.append("2", "A*")
grid.attach(self.comboChooseAlg,0,7,1,1)
self.comboChooseAlg.set_active(0)
self.textBoxFrom = Gtk.Entry()
self.textBoxFrom.connect("key_press_event", self.chooseSearchAlg)
grid.attach(self.textBoxFrom,1,7,1,1)
self.labelTo = Gtk.Label("To")
self.labelTo.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelTo.set_xalign(0.5)
self.labelTo.set_yalign(0.5)
self.labelTo.set_size_request(50,50)
grid.attach(self.labelTo,2,7,1,1)
self.textBoxTo = Gtk.Entry()
self.textBoxTo.connect("key_press_event", self.chooseSearchAlg)
grid.attach(self.textBoxTo,3,7,1,1)
#Row8
self.labelTitle = Gtk.Label("Graphic Menu")
self.labelTitle.modify_font(Pango.FontDescription("Ubuntu 16"))
self.labelTitle.set_xalign(0.5)
self.labelTitle.set_size_request(100,75)
grid.attach(self.labelTitle,0,8,4,1)
#Row9
self.labelTitleAdds = Gtk.Label("Graph of:")
self.labelTitleAdds.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelTitleAdds.set_xalign(0.5)
self.labelTitleAdds.set_size_request(50,50)
grid.attach(self.labelTitleAdds,0,9,1,1)
self.comboChooseAdd = Gtk.ComboBoxText()
self.comboChooseAdd.append("0", "All")
self.comboChooseAdd.append("1", "Specialization")
self.comboChooseAdd.append("2", "Magazine")
grid.attach(self.comboChooseAdd,1,9,1,1)
self.comboChooseAdd.connect("changed", self.setActive)
self.comboChooseAdd.set_active(0)
self.labelTitleAddsPick = Gtk.Label("Spec/Mag:")
self.labelTitleAddsPick.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelTitleAddsPick.set_xalign(0.5)
self.labelTitleAddsPick.set_size_request(50,50)
grid.attach(self.labelTitleAddsPick,2,9,1,1)
self.textBoxAddons = Gtk.Entry()
grid.attach(self.textBoxAddons,3,9,1,1)
self.textBoxAddons.set_sensitive(False)
#Row10
self.labelExportPng = Gtk.Label("Export PNG image")
self.labelExportPng.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelExportPng.set_xalign(0)
self.labelExportPng.set_yalign(0.5)
self.labelExportPng.set_size_request(50,50)
grid.attach(self.labelExportPng,0,10,1,1)
self.comboLayout = Gtk.ComboBoxText()
self.comboLayout.append("0", "")
self.comboLayout.append("1", "Arf")
self.comboLayout.append("2", "Fruchterman Reingold")
grid.attach(self.comboLayout,1,10,1,1)
self.labelFileName = Gtk.Label("File name:")
self.labelFileName.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelFileName.set_xalign(0.5)
self.labelFileName.set_yalign(0.5)
self.labelFileName.set_size_request(50,50)
grid.attach(self.labelFileName,2,10,1,1)
self.textBoxFilename = Gtk.Entry()
self.textBoxFilename.connect("key_press_event", self.generateImage)
grid.attach(self.textBoxFilename,3,10,1,1)
#Row11
self.labelInteractive = Gtk.Label("Interactive window")
self.labelInteractive.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelInteractive.set_xalign(0)
self.labelInteractive.set_yalign(0.5)
self.labelInteractive.set_size_request(50,50)
grid.attach(self.labelInteractive,0,11,1,1)
self.labelInteractive.set_sensitive(False)
self.buttonInteractive = Gtk.Button(label="Execute")
#self.buttonInteractive.connect("button_press_event", self.findEmp)
self.buttonInteractive.set_border_width(buttonWidth)
self.buttonInteractive.connect("button_press_event", self.generateInteractive)
grid.attach(self.buttonInteractive,1,11,1,1)
self.buttonInteractive.set_sensitive(False)
#Row12
self.labelWidget = Gtk.Label("Widget window")
self.labelWidget.modify_font(Pango.FontDescription("Ubuntu 12"))
self.labelWidget.set_xalign(0)
self.labelWidget.set_yalign(0.5)
self.labelWidget.set_size_request(50,50)
grid.attach(self.labelWidget,0,12,1,1)
self.buttonWidget = Gtk.Button(label="Execute")
self.buttonWidget.set_border_width(buttonWidth)
self.buttonWidget.connect("button_press_event", self.generateWidget)
grid.attach(self.buttonWidget,1,12,1,1)
# self.buttonClear = Gtk.Button(label="Clear table")
# self.buttonClear.set_border_width(buttonWidth)
# grid.attach(self.buttonClear,3,11,1,1)
#Column5 Statistic List
self.software_liststore = Gtk.ListStore(int,str,str,int,str,str)
self.current_filter_language = None
self.language_filter = self.software_liststore.filter_new()
self.language_filter.set_visible_func(self.language_filter_func)
self.treeview = Gtk.TreeView.new_with_model(self.software_liststore)
self.treesorted = Gtk.TreeModelSort(self.software_liststore)
self.column = [None] * 6
for i in xrange(6):
self.column[i] = 1
for i, column_title in enumerate(["Num/D","Source vertice", "Destination vertice", "Value","Section","Specialization"]):
renderer = Gtk.CellRendererText()
self.column[i] = Gtk.TreeViewColumn(column_title, renderer, text=i)
self.column[i].set_sort_column_id(i)
self.treeview.append_column(self.column[i])
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
self.scrollable_treelist.add(self.treeview)
self.scrollable_treelist.set_border_width(buttonWidth)
self.scrollable_treelist.set_size_request(700,300)
grid.attach(self.scrollable_treelist,4,2,1,10)
self.connect("key_press_event", self.showPaths)
def showPaths(self,widget,event):
if event.keyval == Gdk.KEY_F2 or event.keyval == Gdk.KEY_F1:
if event.keyval == Gdk.KEY_F2:
self.pathsIter += 1
if self.pathsIter >= len(self.validPaths):
self.pathsIter = len(self.validPaths)-1
elif event.keyval == Gdk.KEY_F1:
self.pathsIter -= 1
if self.pathsIter < 0:
self.pathsIter = 0
self.software_liststore.clear()
jterator = 0
for i in self.column:
i.set_visible(True)
try:
for j in self.validPaths[self.pathsIter][:-1]:
#Same specializations
specials = ""
for specs in self.graph.vpropSpecializations[self.validPaths[self.pathsIter][jterator]]:
if specs in self.graph.vpropSpecializations[self.validPaths[self.pathsIter][jterator+1]]:
specials += specs + " "
#Insert data to
self.software_liststore.append(
[jterator,
self.graph.vpropSurname[self.validPaths[self.pathsIter][jterator]],
self.graph.vpropSurname[self.validPaths[self.pathsIter][jterator+1]],
int(self.graph.epropCounter[self.graph.g.edge( self.validPaths[self.pathsIter][jterator], self.validPaths[self.pathsIter][jterator+1] ) ]),
specials])
jterator += 1
except:
pass
def chooseSearchAlg(self, widget, event):
if event.keyval == Gdk.KEY_Return:
if self.comboChooseAlg.get_active() == 2:
self.findA()
elif self.comboChooseAlg.get_active() == 1:
self.findFromTo()
def findA(self):
source = self.textBoxFrom.get_text()
destination = self.textBoxTo.get_text()
dist, pred,destinationV,touch_v,touch_e, target = self.graph.findPathA(source,destination)
self.software_liststore.clear()
ecolor = self.graph.g.new_edge_property("string")
ewidth = self.graph.g.new_edge_property("double")
ewidth.a = 1
for e in self.graph.g.edges():
ecolor[e] = "#3465a4" if touch_e[e] else "#d3d7cf"
v = target
path = []
while v != self.graph.g.vertex(destinationV):
p = self.graph.g.vertex(pred[v])
for e in v.out_edges():
if e.target() == p:
ecolor[e] = "#a40000"
ewidth[e] = 3
path.append(e)
v = p
self.software_liststore.clear()
for i in self.column:
i.set_visible(True)
for i, j in enumerate(path):
specials = ""
for specs in self.graph.vpropSpecializations[j.source()]:
if specs in self.graph.vpropSpecializations[j.target()]:
specials += specs + " "
self.software_liststore.append([i,
self.graph.vpropSurname[j.source()],
self.graph.vpropSurname[j.target()],
int(self.graph.epropCounter[self.graph.g.edge( j.source(), j.target() )]),
self.graph.vpropMail[j.source()] + " " + self.graph.vpropMail[j.target()],
specials
])
graph_draw(self.graph.g, pos=self.graph.GlobalPos, output_size=(300, 300), vertex_fill_color=touch_v,
vcmap=matplotlib.cm.binary, edge_color=ecolor,
edge_pen_width=ewidth, output="astar-delaunay.pdf")
def generateImage(self,widget,event):
if event.keyval == Gdk.KEY_Return:
fileName = self.textBoxFilename.get_text()
fileDir = self.onFolderClicked1()
if self.comboLayout.get_active() == 1:
self.graph.drawGraphArfLayout(fileName)
elif self.comboLayout.get_active() == 2:
self.graph.drawGraphFruchtermanReingoldLayout(fileName)
shutil.move(os.getcwd()+"/"+fileName+".png",fileDir)
def generateInteractive(self,widget,event):
self.graph.interactiveWindow()
def generateWidget(self,widget,event):
self.graph.graphWidget(self.comboChooseAdd.get_active(), self.textBoxAddons.get_text() )
def findFromTo(self):
source = self.textBoxFrom.get_text()
destination = self.textBoxTo.get_text()
self.software_liststore.clear()
self.validPaths = []
self.validPaths = self.graph.findPath(source, destination)
if bool(self.validPaths) == False:
return
for i in self.column:
i.set_visible(True)
else:
#try:
self.pathsIter = 0
jterator = 0
for j in self.validPaths[self.pathsIter][:-1]:
#Same specializations
specials = ""
for specs in self.graph.vpropSpecializations[self.validPaths[self.pathsIter][jterator]]:
if specs in self.graph.vpropSpecializations[self.validPaths[self.pathsIter][jterator+1]]:
specials += specs + " "
#Insert data to liststore
self.software_liststore.append(
[jterator,
self.graph.vpropSurname[self.validPaths[self.pathsIter][jterator]],
self.graph.vpropSurname[self.validPaths[self.pathsIter][jterator+1]],
int(self.graph.epropCounter[self.graph.g.edge( self.validPaths[self.pathsIter][jterator], self.validPaths[self.pathsIter][jterator+1] ) ]),
specials
])
jterator += 1
def comboShow(self,widget):
#Set sortable
j = 0
for i in self.column:
i.set_sort_column_id(j)
j += 1
#Show all the vertices of the graph
if self.combo.get_active() == 1:
self.software_liststore.clear()
for i in self.column:
i.set_visible(True)
self.column[2].set_visible(False)
for v in self.graph.g.vertices():
#Print specializations
specials = ""
for j in self.graph.vpropSpecializations[v]:
specials += j + " "
self.software_liststore.append([
int(v),
self.graph.vpropSurname[self.graph.g.vertex(v)],
None,
int(self.graph.vpropNeighbours[v]),
self.graph.vpropMail[self.graph.g.vertex(v)],
specials
])
#self.software_liststore.set_sort_column_id(2, 1)
#self.combo.set_active(0)
#Show all edges of the graph
if self.combo.get_active() == 2:
self.software_liststore.clear()
for i in self.column:
i.set_visible(True)
for iterator,e in enumerate(self.graph.g.edges()):
#Print the same specializations
specials = ""
for j in self.graph.vpropSpecializations[self.graph.g.vertex(e.source())]:
if j in self.graph.vpropSpecializations[self.graph.g.vertex(e.target())]:
specials += j + " "
self.software_liststore.append([
iterator,
self.graph.vpropSurname[self.graph.g.vertex(e.source())],
self.graph.vpropSurname[self.graph.g.vertex(e.target())],
int(self.graph.epropCounter[self.graph.g.edge( e.source(), e.target())]),
self.graph.vpropMail[self.graph.g.vertex(e.source())] + " " + self.graph.vpropMail[self.graph.g.vertex(e.target())],
specials
])
#self.combo.set_active(0)
def printSpecs(self,widget,event):
if event.keyval == Gdk.KEY_Return:
self.software_liststore.clear()
for i in self.column:
i.set_visible(True)
self.column[3].set_visible(False)
spec = self.textBoxFindSpec.get_text()
iterator = 0
for v in self.graph.g.vertices():
if spec in self.graph.vpropSpecializations[v]:
self.software_liststore.append([iterator,self.graph.vpropSurname[v],None,None, self.graph.vpropMail[v],spec])
iterator += 1
for e in self.graph.g.edges():
if (spec in self.graph.vpropSpecializations[e.source()]) and (spec in self.graph.vpropSpecializations[e.target()]):
self.software_liststore.append([iterator,self.graph.vpropSurname[e.source()],self.graph.vpropSurname[e.target()],None,self.graph.vpropMail[e.source()] + " " + self.graph.vpropMail[e.target()],spec,])
iterator += 1
def printFind(self,widget):
iterator = self.comboDeep.get_active_id()
self.software_liststore.clear()
find = None
for v in self.graph.g.vertices():
#Find vertex
if self.graph.vpropSurname[v] == self.textBoxFind.get_text():
find = v
break
if bool(find) == False:
self.software_liststore.append(["Node not found", None, None, None, None])
return
elif int(iterator)-1 == 0:
for i in self.column:
i.set_visible(True)
self.column[0].set_visible(False)
self.column[3].set_visible(False)
specials = ""
for j in self.graph.vpropSpecializations[v]:
specials += j + " "
self.software_liststore.append([None,self.graph.vpropSurname[v], "Node has been found", int(self.graph.vpropNeighbours[v]), self.graph.vpropMail[v], specials])
#Find neighbours
visited = []
selected = []
tmp = []
self.column[0].set_visible(True)
self.column[3].set_visible(True)
selected.append(int(find))
for i in xrange(0, int(iterator)-1):
for v in self.graph.g.vertices():
if (int(v) in selected) and (int(v) not in visited):
for e in v.out_neighbours():
if int(e) not in visited:
#Same specializations
specials = ""
for j in self.graph.vpropSpecializations[self.graph.g.vertex(v)]:
if j in self.graph.vpropSpecializations[self.graph.g.vertex(e)]:
specials += j + " "
tmp.append(int(e))
self.software_liststore.append([
i+1,
self.graph.vpropSurname[v],
self.graph.vpropSurname[e],
int(self.graph.epropCounter[self.graph.g.edge( v, e)]),
self.graph.vpropMail[v] + " " + self.graph.vpropMail[e],
specials
])
visited.append(int(v))
for t in tmp:
selected.append(t)
tmp = []
def language_filter_func(self, model, iter, data):
if self.current_filter_language is None or self.current_filter_language == "None":
return True
else:
return model[iter][0] == self.current_filter_language
def onFileClicked(self, widget):
dialog = Gtk.FileChooserDialog("Please choose a file", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
self.addFilters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
print("Open clicked")
print("File selected: " + dialog.get_filename())
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
def addFilters(self, dialog):
filter_text = Gtk.FileFilter()
filter_text.set_name("Text files")
filter_text.add_mime_type("text/plain")
dialog.add_filter(filter_text)
filter_py = Gtk.FileFilter()
filter_py.set_name("Python files")
filter_py.add_mime_type("text/x-python")
dialog.add_filter(filter_py)
filter_any = Gtk.FileFilter()
filter_any.set_name("Any files")
filter_any.add_pattern("*")
dialog.add_filter(filter_any)
def onFolderClicked(self, widget):
dialog = Gtk.FileChooserDialog("Please choose a folder", self,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
"Select", Gtk.ResponseType.OK))
dialog.set_default_size(800, 400)
response = dialog.run()
if response == Gtk.ResponseType.OK:
fileDir = dialog.get_filename()
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
try:
self.graph.g.save("my_self.graph.graphml")
shutil.move(os.getcwd()+"/my_self.graph.graphml",fileDir)
except:
print "AAAA"
def onFolderClicked1(self):
dialog = Gtk.FileChooserDialog("Please choose a folder", self,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
"Select", Gtk.ResponseType.OK))
dialog.set_default_size(800, 400)
response = dialog.run()
if response == Gtk.ResponseType.OK:
tmp = dialog.get_filename()
dialog.destroy()
return tmp
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
def onAboutClicked(self, widget):
win = About()
win.connect("delete_event", Gtk.main_quit)
win.show_all()
Gtk.main()
def h(self, v, target, pos):
return np.sqrt(sum((pos[v].a - pos[target].a) ** 2))
def setActive(self,widget):
if self.comboChooseAdd.get_active() == 1 or self.comboChooseAdd.get_active() == 2:
self.textBoxAddons.set_sensitive(True)
else:
self.textBoxAddons.set_text("")
self.textBoxAddons.set_sensitive(False)
def openDatabase(self,widget):
win = DatabaseWindow(self.graph)
win.connect("delete_event", Gtk.main_quit)
win.show_all()
Gtk.main()
| unlicense |
OSSOS/MOP | src/ossos/planning/layout40.py | 2 | 28896 | import math
import operator
import os
import string
import sys
from math import sqrt
import Polygon.IO
import ephem
import numpy as np
from astropy.io import votable
from matplotlib import rcParams
from matplotlib.patches import Ellipse
from matplotlib.patches import Rectangle
from matplotlib.pyplot import figure, savefig
from ossos import mpc
from ossos import orbfit
from ossos import storage
from . import invariable
from . import megacam
from . import mpcread
from . import usnoB1
USER = os.getenv('HOME', '/Users/michele/')
# USER = '/Users/jjk/'
def plot_line(axes, fname, ltype):
"""plot the ecliptic plane line on the given axes."""
x = np.genfromtxt(fname, unpack=True)
axes.plot(x[0], x[1], ltype)
MPCORB_FILE = os.path.join(USER, 'MPCORB-Distant.dat')
# MPCORB_FILE = os.path.join(USER, 'Dropbox/Develop/code/MPCORB.DAT')
L7MODEL = 'vos:OSSOS/CFEPS/L7SyntheticModel-v09.txt'
# L7MODEL = '/Users/kavelaarsj/Dropbox/Research/KuiperBelt/OSSOS/L7SyntheticModel-v09.txt'
REAL_KBO_AST_DIR = '/Users/jjk/Dropbox/Research/KuiperBelt/OSSOS/dbaseclone/ast'
# REAL_KBO_AST_DIR = os.path.join(USER, 'Dropbox/OSSOS/measure3/ossin/')
PLOT_FIELD_EPOCH = 'Oct16' # Oct14.00 ==> '0' days since the New Moon on Oct14
# TODO the .00 is appended when this variable is used as a keyword that needs that .00 this is bad.
DISCOVERY_NEW_MOON = 'Nov15' # this is the date that the RA/DEC in blocks corresponds to.
PLOT_USNO_STARS = True
PLOT_MEGACAM_ARCHIVE_FIELDS = True
PLOT_SYNTHETIC_KBOS = True
PLOT_SYNTHETIC_KBO_TRAILS = False
PLOT_REAL_KBOS = True and os.access(REAL_KBO_AST_DIR, os.F_OK)
PLOT_FIELD_LAYOUT = True
PLOT_MPCORB = False and os.access(MPCORB_FILE, os.F_OK)
LABEL_FIELDS = True
LABEL_PLANETS = True
# # EmulateApJ columnwidth=245.26 pts
fig_width_pt = 246.0 * 3.0
inches_per_pt = 1.0 / 72.27
golden_mean = (sqrt(5.) - 1.0) / 2.0
fig_width = fig_width_pt * inches_per_pt
fig_height = fig_width * golden_mean * 1.5
fig_size = [fig_width, fig_height]
params = {'backend': 'pdf',
'axes.labelsize': 12,
'text.fontsize': 12,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'font.serif': 'Times',
'font.family': 'serif',
'image.aspect': 'equal',
'figure.subplot.left': 0.2,
'figure.subplot.bottom': 0.15,
'figure.figsize': fig_size}
rcParams.update(params)
etHeader = """<?xml version = "1.0"?>
<!DOCTYPE ASTRO SYSTEM "http://vizier.u-strasbg.fr/xml/astrores.dtd">
<ASTRO ID="v0.8" xmlns:ASTRO="http://vizier.u-strasbg.fr/doc/astrores.htx">
<TABLE ID="Table">
<NAME>Ephemeris</NAME>
<TITLE>Ephemeris for CFHT QSO</TITLE>
<!-- Definition of each field -->
<FIELD name="DATE_UTC" datatype="A" width="19" format="YYYY-MM-DD hh:mm:ss">
<DESCRIPTION>UTC Date</DESCRIPTION>
</FIELD>
<FIELD name="RA_J2000" datatype="A" width="11" unit="h" format="RAh:RAm:RAs">
<DESCRIPTION>Right ascension of target</DESCRIPTION>
</FIELD>
<FIELD name="DEC_J2000" datatype="A" width="11" unit="deg" format="DEd:DEm:DEs">
<DESCRIPTION>Declination of target</DESCRIPTION>
</FIELD>
<!-- Data table -->
<DATA><CSV headlines="4" colsep="|">
<![CDATA[
DATE_UTC |RA_J2000 |DEC_J2000 |
YYYY-MM-DD hh:mm:ss|hh:mm:ss.ss|+dd:mm:ss.s|
1234567890123456789|12345678901|12345678901|
-------------------|-----------|-----------|
"""
header = """<?xml version = "1.0"?>
<!DOCTYPE ASTRO SYSTEM "http://vizier.u-strasbg.fr/xml/astrores.dtd">
<ASTRO ID="v0.8" xmlns:ASTRO="http://vizier.u-strasbg.fr/doc/astrores.htx">
<TABLE ID="Table">
<NAME>Fixed Targets</NAME>
<TITLE>Fixed Targets for CFHT QSO</TITLE>
<!-- Definition of each field -->
<FIELD name="NAME" datatype="A" width="20">
<DESCRIPTION>Name of target</DESCRIPTION>
</FIELD>
<FIELD name="RA" ref="" datatype="A" width="11" unit=""h:m:s"">
<DESCRIPTION>Right ascension of target</DESCRIPTION>
</FIELD>
<FIELD name="DEC" ref="" datatype="A" width="11" unit=""d:m:s"">
<DESCRIPTION>Declination of target</DESCRIPTION>
</FIELD>
<FIELD name="EPOCH" datatype="F" width="6">
<DESCRIPTION>Epoch of coordinates</DESCRIPTION>
</FIELD>
<FIELD name="POINT" datatype="A" width="5">
<DESCRIPTION>Pointing name</DESCRIPTION>
</FIELD>
<!-- Data table -->
<DATA><CSV headlines="4" colsep="|"><![CDATA[
NAME |RA |DEC |EPOCH |POINT|
|hh:mm:ss.ss|+dd:mm:ss.s| | |
12345678901234567890|12345678901|12345678901|123456|12345|
--------------------|-----------|-----------|------|-----|
"""
# spring13
# blocks = {#'13AE': {"RA": "14:15:28.89", "DEC": "-12:32:28.4"}, # E+0+0: image 1616681, ccd21 on April 9
# '13AO': {"RA": "15:58:01.35", "DEC": "-12:19:54.2"}, # O+0+0: image 1625346, ccd21 on May 8
# '14AN': {'RA': "15:30:00.00", "DEC": "-11:00:00.0"},
# '14AM': {'RA': "15:30:00.00", "DEC": "-12:20:00.0"},
# '14AS': {'RA': "15:12:00.00", "DEC": "-17:40:00.0"}}
# fall13
# blocks = {'13BL': {'RA': "00:54:00.00", "DEC": "+03:50:00.00"}} # ,
# '13AE': {"RA": "14:15:28.89", "DEC": "-12:32:28.4"}, # E+0+0: image 1616681, ccd21 on April 9
# '13AO': {"RA": "15:58:01.35", "DEC": "-12:19:54.2"}, # O+0+0: image 1625346, ccd21 on May 8
# '13BH': {'RA': "01:30:00.00", "DEC": "+13:00:00.00"},
# '14AM': {'RA': "15:30:00.00", "DEC": "-12:20:00.0"}
# }
## this position is derived from the +0+0 field for 13B observed on November 1st 2013
# blocks = {'14BH': {'RA': "01:28:32.32", "DEC": "+12:51:06.10"}}
# fall 2015
blocks = {'15BD': {'RA': "03:15:00.00", "DEC": "+16:30:00.00"}}
# spring15
# blocks = {'15AP': {'RA': "13:30:00", "DEC": "-07:45:00"}}
# blocks = {'14AM': {'RA': "15:30:00.00", "DEC": "-12:20:00.0"}} # the centre when set in May 2014.
# will then define a 15AM to work properly.
# blocks = {'15AM': {'RA': "15:35:00.00", "DEC": "-12:10:00.0"}} # the centre for discovery in May 2015.
# blocks = {'15AM': {'RA': "15:35:00.00", "DEC": "-12:10:00.0"}} # the centre for discovery in May 2015.
# blocks = {
# '14BH': {'RA': "01:28:32.32", "DEC": "+12:51:06.10"},
# '13BL': {'RA': "00:54:00.00", "DEC": "+03:50:00.00"},
# '15BS': {'RA': "00:30:00.00", "DEC": "+04:45:00.00"}}
blocks = {
# E+0+0: image 1616681, ccd21 on April 9. E block discovery triplets are April 4,9,few 19
# '13AE': {"RA": "14:15:28.89", "DEC": "-12:32:28.4"},
# '13AO': {"RA": "15:58:01.35", "DEC": "-12:19:54.2"}, # O+0+0: image 1625346, ccd21 on May 8. O block are
# May 7,8.
# '13BL': {'RA': "00:54:00.00", "DEC": "+03:50:00.00"}, # 13B blocks are at their opposition locations
# '14BH': {'RA': "01:30:00.00", "DEC": "+13:00:00.00"}, # due to bad weather, discovery wasn't until 2014, so 14
# '15AP': {'RA': "13:30:00.00", "DEC": "-7:45:00.00"}, # on-plane
# '15AM': {'RA': "15:35:00.00", "DEC": "-12:10:00.0"} # positioned for its 2015 discovery opposition.
# '15BS': {'RA': "00:30:00.00", "DEC": "+05:00:00.00"}, # rejected: dec "-02:45:00.00"
'15BD': {'RA': "03:15:00.00", "DEC": "+16:30:00.00"}
}
newMoons = {
# 'Feb13': "2013/02/10 10:00:00",
# 'Mar13': "2013/03/11 10:00:00",
# 'Apr13': "2013/04/10 10:00:00",
# 'May13': "2013/05/09 10:00:00",
# 'Jun13': "2013/06/08 10:00:00",
# 'Jul13': "2013/07/08 10:00:00",
# 'Aug13': "2013/08/06 10:00:00",
# 'Sep13': '2013/09/05 10:00:00',
# 'Oct13': '2013/10/04 10:00:00',
# 'Nov13': '2013/11/03 10:00:00',
# 'Dec13': '2013/12/02 10:00:00',
# 'Jan14': '2014/01/01 10:00:00',
# 'Feb14': '2014/01/31 10:00:00',
# 'Mar14': '2014/03/28 10:00:00',
# 'Apr14': '2014/04/01 10:00:00',
# 'May14': '2014/05/28 10:00:00',
# 'Jun14': '2014/06/26 10:00:00',
# 'Jul14': '2014/07/26 10:00:00',
# 'Aug14': "2014/08/25 10:00:00",
# 'Sep14': '2014/09/24 10:00:00',
# 'Oct14': '2014/10/23 10:00:00',
# 'Nov14': '2014/11/22 10:00:00',
# 'Dec14': '2014/12/22 10:00:00',
# 'Jan15': '2015/01/20 10:00:00',
# 'Feb15': '2015/02/18 10:00:00',
# 'Mar15': '2015/03/19 10:00:00',
# 'Apr15': '2015/04/18 10:00:00',
# 'May15': '2015/05/17 10:00:00',
# 'Jun15': '2015/06/16 10:00:00',
# 'Jul15': '2015/07/15 10:00:00',
# 'Aug15': '2015/08/14 10:00:00',
# 'Sep15': '2015/09/12 10:00:00',
# 'Oct15': '2015/10/12 10:00:00',
'Nov15': '2015/11/11 10:00:00',
# 'Dec15': '2015/12/11 10:00:00',
# 'Jan16': '2016/01/09 10:00:00',
# 'Feb16': '2016/03/08 10:00:00',
# 'Mar16': '2016/03/09 10:00:00',
# 'Apr16': '2016/04/08 10:00:00',
# 'May16': '2016/05/07 10:00:00',
# 'Jun16': '2016/06/05 10:00:00',
# 'Jul16': '2016/07/05 10:00:00',
# 'Aug16': '2016/08/03 10:00:00',
'Sep16': '2016/09/01 10:00:00',
'Oct16': '2016/10/01 10:00:00',
# 'Nov16': '2016/11/01 10:00:00',
# 'Dec16': '2016/12/01 10:00:00',
# 'Jan17': '2017/01/01 10:00:00',
# 'Feb17': '2017/02/01 10:00:00',
}
xgrid = {'2014': [-3, -2, -1, 0, 1, 2, 3],
'2014r': [-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5],
'astr': [1, 2, 3, 4],
'40ccd': [2, 1, 0, -1, -2]}
# 2013
# 'astr':[-5.5,-4.5,-3.5,-2.5,-1.5,-0.5,0.5,1.5,2.5,3.5,4.5,5.5]}
ygrid = {'2014': [-1, 0, 1],
'astr': [-2.5, -1.5, -0.5, 0.5, 1.5],
'2014r': [-1.5, -0.5, 0.5, 1.5],
'40ccd': [-1.5, -0.5, 0.5, 1.5]}
# 2013
# 'astr': [-2.2,-1.2, -0.2, 0.8, 1.8],
block_centre = ephem.Ecliptic(0, 0)
field_centre = ephem.Ecliptic(0, 0)
## foffset is the offset (in RA and DEC) between fields
field_offset = math.radians(1.00)
# which year, for the x/y offset grids, are we using.
year = '40ccd'
# the 'astr' year indicates astrometric grid, this should have more overlap so reduce the field_offset for those.
if year == 'astr':
field_offset *= 0.75
## camera_dimen is the size of the field.
## this is the 40 CD Layout. The width is set to exlcude one of the
## Ears. dimen is really the height
pixscale = 0.184
detsec_ccd09 = ((4161, 2114), (14318, 9707))
detsec_ccd10 = ((6279, 4232), (14317, 9706))
detsec_ccd36 = ((2048, 1), (14318, 9707))
detsec_ccd37 = ((23219, 21172), (14309, 9698))
detsec_ccd00 = ((4160, 2113), (19351, 14740))
detsec_ccd17 = ((21097, 19050), (14309, 9698))
detsec_ccd35 = ((16934, 18981), (11, 4622))
print(detsec_ccd09[0][0] - detsec_ccd10[0][1], detsec_ccd09[0][1] - detsec_ccd36[0][0])
camera_width = (max(detsec_ccd37[0]) - min(detsec_ccd09[0]) + 1) * pixscale / 3600.0
camera_height = (max(detsec_ccd00[1]) - min(detsec_ccd35[1]) + 1) * pixscale / 3600.0
camera_width_40 = (max(detsec_ccd37[0]) - min(detsec_ccd36[0]) + 1) * pixscale / 3600.0
camera_width_36 = (max(detsec_ccd17[0]) - min(detsec_ccd09[0]) + 1) * pixscale / 3600.0
print(camera_width, camera_width_40, camera_width_36, camera_height)
years = {"2014": {"ra_off": ephem.hours("00:00:00"),
"dec_off": ephem.hours("00:00:00"),
"fill": False,
"facecolor": 'k',
"alpha": 0.5,
"color": 'b'},
"40ccd": {"ra_off": ephem.hours("00:00:00"),
"dec_off": ephem.hours("00:00:00"),
"fill": False,
"facecolor": 'k',
"alpha": 0.5,
"color": 'b'},
"2014r": {"ra_off": ephem.hours("00:05:00"),
"dec_off": ephem.degrees("00:18:00"),
"alpha": 0.5,
"fill": False,
"facecolor": 'k',
"color": 'g'},
'astr': {"ra_off": ephem.hours("00:00:00"),
"dec_off": ephem.degrees("-00:12:00"),
"alpha": 0.05,
"fill": True,
'facecolor': 'k',
"color": 'none'},
}
# coverage is going to contain the polygons that describe our individual fields.
coverage = []
# ras/decs will hold the centers of the search fields, in degrees.
ras = []
decs = []
fix = open('Fall14.xml', 'w')
fix.write(header)
# build the pointings that will be used for the discovery fields
# This is a bit tricky as we keep the heliocentric longitude constant while moving in RA
# and get latitude coverage by changing the DEC.
# Compute the coverage of the discovery fields.
field_names = []
field_polygon = None
for block in list(blocks.keys()):
ra_start = ephem.hours(blocks[block]["RA"]) + years[year]["ra_off"]
dec_start = ephem.degrees(blocks[block]["DEC"]) + years[year]["dec_off"]
height = math.radians(camera_height)
idy = 0
for dx in xgrid[year]:
idx = int(dx)
for dy in ygrid[year]:
idy = int(math.floor(dy))
decc = ephem.degrees(dec_start + dx * height / 2.0)
this_decc = decc + dy * height
width = 1.007 * math.radians(camera_width / math.cos(this_decc))
## set to a -ve instead of +ve for the 'fall'
rac = ephem.hours(ra_start + dx * width)
dec = math.degrees(this_decc)
ra = math.degrees(rac)
print("{} {} {}".format(rac, dec, math.degrees(width)))
field_name = "%s%+d%+d" % (block, idx, idy)
field_names.append(field_name)
decs.append(np.radians(dec))
ras.append(np.radians(ra))
xcen = ra
ycen = dec
fix.write('%20s|%10s|%10s|2000.0| 1|\n' % (field_name,
ephem.hours(math.radians(ra)),
ephem.degrees(math.radians(dec))))
dimenw = math.degrees(width)
dimenh = math.degrees(height)
this_point_polygon = Polygon.Polygon((
(xcen - dimenw / 2.0, ycen - dimenh / 2.0),
(xcen - dimenw / 2.0, ycen + dimenh / 2.0),
(xcen + dimenw / 2.0, ycen + dimenh / 2.0),
(xcen + dimenw / 2.0, ycen - dimenh / 2.0),
(xcen - dimenw / 2.0, ycen - dimenh / 2.0)))
field_polygon = field_polygon is None and this_point_polygon or this_point_polygon | field_polygon
fix.write("""]]</CSV></DATA>
</TABLE>
</ASTRO>
""")
fix.close()
field_polygon.simplify()
print("Field Area: {} degrees squared".format(field_polygon.area()))
print("Field Centre: {} ".format(field_polygon.center()))
(ra_min, ra_max, dec_min, dec_max) = field_polygon.boundingBox()
if year == "astr":
## we don't do trailing for the astr ometric grid, or make any plots...
sys.exit(0)
ras = np.array(ras)
decs = np.array(decs)
ra_cen = math.degrees(ras.mean())
dec_cen = math.degrees(decs.mean())
# ra_cen = 15.0
# dec_cen = 5.0
# width = 245 - 205
# height = -8 + 25
# ra_cen = 180.0
# dec_cen = 0.0
# width = 360
# height = 70
# ra_cen = 45.0
# dec_cen = 17.5
### These values are the half width/height of the field.
width = 24
height = 12
## lets make a plot of the field selections.
fig = figure()
ax = fig.add_subplot(111)
# ax.set_aspect('equal')
# xylims = [250, 195, -17, -4] # all of A semester
# xylims = [27, 3, -6, 17] # H, L and low S blocks
# xylims = [26, 3, 0, 16] # H, L and high S blocks
xylims = [60, 40, 12, 21] # D block
ax.set_xlim(xylims[0], xylims[1]) # ra_cen + width, ra_cen - width)
ax.set_ylim(xylims[2], xylims[3]) # dec_cen - height, dec_cen + height)
# ax.set_ylim(-30,30)
ax.set_xlabel('RA (deg)')
ax.set_ylabel('DE (deg)')
ax.grid()
## plot the galactic plane line ..
plot_line(ax, 'eplane.radec', 'b-')
plot_line(ax, 'gplane.radec', 'g-')
ax.text(20, 8.5, 'ecliptic', fontdict={'color': 'b'})
# plot the invariant plane
lon = np.arange(-2 * math.pi, 2 * math.pi, 0.5 / 57)
lat = 0 * lon
(lat, lon) = invariable.trevonc(lat, lon)
ec = [ephem.Ecliptic(x, y) for (x, y) in np.array((lon, lat)).transpose()]
eq = [ephem.Equatorial(coord) for coord in ec]
ax.plot([math.degrees(coord.ra) for coord in eq],
[math.degrees(coord.dec) for coord in eq],
'-k',
lw=1,
alpha=0.7)
ax.text(25, 7.5, 'invariant', fontdict={'color': 'k'})
(cc_lat, cc_lon) = invariable.trevonc(lat, lon, chiangchoi_plane=True)
ec = [ephem.Ecliptic(x, y) for (x, y) in np.array((cc_lon, cc_lat)).transpose()]
eq = [ephem.Equatorial(coord) for coord in ec]
ax.plot([math.degrees(coord.ra) for coord in eq],
[math.degrees(coord.dec) for coord in eq],
'.r',
lw=1,
alpha=0.7)
ax.text(25, 6, 'Chiang_Choi', fontdict={'color': 'r'})
## build a list of Synthetic KBOs that will be in the discovery fields.
print("LOADING SYNTHETIC MODEL KBOS FROM: {}".format(L7MODEL))
ra = []
dec = []
kbos = []
lines = storage.open_vos_or_local(L7MODEL).read().split('\n')
# Look for synthetic KBOs that are in the field on this date.
discovery_date = ephem.date(newMoons[DISCOVERY_NEW_MOON])
plot_date = ephem.date(newMoons[PLOT_FIELD_EPOCH])
for line in lines:
if len(line) == 0 or line[0] == '#': # skip initial column descriptors and the final blank line
continue
kbo = ephem.EllipticalBody()
values = line.split()
kbo._a = float(values[0])
kbo._e = float(values[1])
kbo._inc = float(values[2])
kbo._Om = float(values[3])
kbo._om = float(values[4])
kbo._M = float(values[5])
kbo._H = float(values[6])
epoch = ephem.date(2453157.50000 - ephem.julian_date(0))
kbo._epoch_M = epoch
kbo._epoch = epoch
kbo.name = values[8]
kbo.compute(discovery_date)
kbo_ra = math.degrees(kbo.ra)
kbo_dec = math.degrees(kbo.dec)
## keep a list of KBOs that are in the discovery pointings
if field_polygon.isInside(math.degrees(float(kbo.ra)), math.degrees(float(kbo.dec))):
### only keep objects that are brighter than limit
if kbo.mag < 25.0:
kbos.append(kbo)
if PLOT_SYNTHETIC_KBOS:
kbo.compute(plot_date)
ax.scatter(math.degrees(float(kbo.ra)), math.degrees(float(kbo.dec)), c='k', marker='o', s=2, alpha=0.8)
print("{} KBOs found in coverage on {}".format(len(kbos), discovery_date))
## Now we work out how far to move the fields at different lunations
seps = {}
dates = {}
## build a list of dates that we will need field locations for.
for month in newMoons:
if month != PLOT_FIELD_EPOCH:
continue
for night in range(-9, 9):
epoch = "%s.%s" % (month, string.zfill(night, 2))
dates[epoch] = ephem.date(ephem.date(newMoons[month]) + night)
seps[epoch] = {'dra': 0, 'ddec': 0}
print("COMPUTING NOMINAL MOTIONS USING SYNTHETIC MODEL KBOS FROM: {}".format(L7MODEL))
## Compute the typical motion of KBOs that were in the discovery field.
for kbo in kbos:
kbo.compute(discovery_date)
ra = kbo.ra
dec = kbo.dec
for epoch in dates:
date = dates[epoch]
kbo.compute(date)
seps[epoch]['dra'] += kbo.ra - ra
seps[epoch]['ddec'] += kbo.dec - dec
## plot source locations at the start
## middle and end of semester
if PLOT_SYNTHETIC_KBO_TRAILS:
print("PLOTTING TRAILS USING SYNTHETIC MODEL KBOS FROM: {}".format(L7MODEL))
colours = {'Sep14': 'g', 'Oct14': 'b', 'Nov14': 'r'}
alpha = {'Sep14': 0.3, 'Oct14': 0.7, 'Nov14': 0.3}
zorder = {'Sep14': 1, 'Oct14': 5, 'Nov14': 2}
for month in ['Sep14', 'Oct14', 'Nov14']:
ra = []
dec = []
date = ephem.date(newMoons[month])
for kbo in kbos:
kbo.compute(date)
ra.append(math.degrees(kbo.ra))
dec.append(math.degrees(kbo.dec))
ax.plot(ra, dec, colours[month] + '.', alpha=alpha[month], zorder=zorder[month])
# compute the mean of the separation between the location at discovery and the location at epoch.
for month in seps:
seps[month]['dra'] /= float(len(kbos))
seps[month]['ddec'] /= float(len(kbos))
sorted_epochs = sorted(iter(dates.items()), key=operator.itemgetter(1))
camera_width = 0.983 * camera_width
camera_height = 0.983 * camera_height
print("CREATING ET XML FILES USING BASE POINTING AND NOMINAL MOTION RATES")
for idx in range(len(ras)):
name = field_names[idx]
f = file('%s.xml' % name, 'w')
f.write(etHeader)
for epoch in sorted_epochs:
epoch = epoch[0]
date = dates[epoch]
ra = ras[idx] + seps[epoch]['dra']
dec = decs[idx] + seps[epoch]['ddec']
tdate = date.tuple()
zf = string.zfill
sdate = "%4s-%2s-%2s %2s:%2s:%2s" % (
tdate[0], zf(tdate[1], 2), zf(tdate[2], 2), zf(tdate[3], 2), zf(tdate[4], 2), zf(int(tdate[5]), 2))
f.write('%19s|%11s|%11s|\n' % (sdate, ephem.hours(ra), ephem.degrees(dec)))
if epoch == PLOT_FIELD_EPOCH + ".00" and PLOT_FIELD_LAYOUT:
ax.add_artist(
Rectangle(xy=(math.degrees(ra) - camera_width_40 / 2.0, math.degrees(dec) - camera_height / 4.0),
width=camera_width_40,
height=camera_height / 2.0,
color='b',
lw=0.5, fill=False, alpha=0.3))
ax.add_artist(
Rectangle(xy=(math.degrees(ra) - camera_width_36 / 2.0, math.degrees(dec) - camera_height / 2.0),
height=camera_height / 4.0,
width=camera_width_36,
color='g',
lw=0.5, fill=False, alpha=0.3))
ax.add_artist(
Rectangle(xy=(math.degrees(ra) - camera_width_36 / 2.0, math.degrees(dec) + camera_height / 4.0),
height=camera_height / 4.0,
width=camera_width_36,
color='r',
lw=0.5, fill=False, alpha=0.3))
if LABEL_FIELDS:
ax.text(math.degrees(ra), math.degrees(dec),
name,
horizontalalignment='center',
verticalalignment='center',
zorder=10,
fontdict={'size': 4, 'color': 'darkblue'})
f.write("""]]</CSV></DATA>\n</TABLE>\n</ASTRO>\n""")
f.close()
if PLOT_USNO_STARS:
print("PLOTTING LOCATIONS NEARBY BRIGHT USNO B1 STARS")
for ra in range(int(ra_cen - width), int(ra_cen + width), 10):
for dec in range(int(dec_cen - height), int(dec_cen + height), 10):
file_name = USER + "/new_usno/usno{:5.2f}{:5.2f}.xml".format(ra, dec).replace(" ", "")
print(file_name)
file_name = file_name.replace(" ", "")
if not os.access(file_name, os.R_OK):
usno = usnoB1.TAPQuery(ra, dec, 10.0, 10.0)
fobj = open(file_name, 'w')
fobj.write(usno.read())
fobj.close()
try:
t = votable.parse(open(file_name, 'r')).get_first_table()
except:
print("No USNO stars found for: {} {}\n".format(ra, dec))
continue
select = t.array['Bmag'] < 9.3
Rmag = t.array['Bmag'][select]
min_mag = max(Rmag.min(), 11)
scale = 0.5 * 10 ** ((min_mag - Rmag) / 2.5)
ax.scatter(t.array['RAJ2000'][select], t.array['DEJ2000'][select], s=scale, marker='o', facecolor='y',
alpha=0.3, edgecolor='',
zorder=-10)
for planet in [ephem.Mars(), ephem.Jupiter(), ephem.Saturn(), ephem.Uranus(), ephem.Neptune()]:
planet.compute(plot_date)
ax.scatter(math.degrees(planet.ra), math.degrees(planet.dec),
marker='o',
s=40,
facecolor='r',
edgecolor='g', )
if LABEL_PLANETS:
ax.text(math.degrees(planet.ra), math.degrees(planet.dec),
planet.name,
horizontalalignment='center',
fontdict={'size': 6,
'color': 'darkred'})
if PLOT_MEGACAM_ARCHIVE_FIELDS:
print("PLOTTING FOOTPRINTS NEARBY ARCHIVAL MEGAPRIME IMAGES.")
for qra in range(int(xylims[1]), int(xylims[0]), 5):
print(qra)
for qdec in range(int(xylims[2]), int(xylims[3]), 10):
print(qra, qdec)
filename = USER + "/new_usno/megacam{:6.2f}{:6.2f}.xml".format(qra, qdec)
if not os.access(filename, os.R_OK):
# TAP query will max out silently if return table too large. Make small units.
data = megacam.TAPQuery(qra, qdec, 5.0, 10.0).read()
print(data)
# FIXME: add a catch for 503's when TAP fails when CADC drops connection
fobj = open(filename, 'w')
fobj.write(data)
fobj.close()
fobj = open(filename, 'r')
t = votable.parse(fobj).get_first_table()
ra = t.array['RAJ2000']
dec = t.array['DEJ2000']
rects = [Rectangle(xy=(ra[idx] - camera_width_36 / 2.0, dec[idx] - camera_width_36 / 2.0),
height=camera_height,
width=camera_width_36,
edgecolor='m',
alpha=0.1,
lw=0.1, zorder=-100,
fill=False) for idx in range(ra.size)]
for r in rects:
ax.add_artist(r)
if PLOT_MPCORB:
print("PLOTTING LOCATIONS OF KNOWN KBOs (using {})".format(MPCORB_FILE))
kbos = mpcread.getKBOs(MPCORB_FILE)
for kbo in kbos:
kbo.compute(plot_date)
if field_polygon.isInside(math.degrees(float(kbo.ra)), math.degrees(float(kbo.dec))):
ax.scatter(math.degrees(kbo.ra),
math.degrees(kbo.dec),
marker='x',
s=4,
facecolor='r',
edgecolor='r', alpha=0.3)
if PLOT_REAL_KBOS:
reader = mpc.MPCReader()
print("PLOTTING LOCATIONS OF OSSOS KBOs (using directory {})".format(REAL_KBO_AST_DIR))
for ast in os.listdir(REAL_KBO_AST_DIR):
if not ast.startswith('u'):
obs = reader.read(REAL_KBO_AST_DIR + ast)
try:
kbo = orbfit.Orbfit(obs)
except:
continue
kbo.predict(ephem.julian_date(ephem.date(plot_date)))
# if not field_polygon.isInside(float(kbo.coordinate.ra.degrees), float(kbo.coordinate.dec.degrees)):
# continue
if obs[0].mag < 23.6:
c = 'b'
if LABEL_FIELDS:
ax.text(kbo.coordinate.ra.degree,
kbo.coordinate.dec.degree,
ast.split('.')[0],
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size': 4,
'color': 'darkred'})
else:
c = 'g'
ax.text(kbo.coordinate.ra.degree,
kbo.coordinate.dec.degree,
ast.split('.')[0],
horizontalalignment='center',
verticalalignment='baseline',
fontdict={'size': 4,
'color': 'darkred'})
# from astropy import units
ax.add_artist(Ellipse((kbo.coordinate.ra.degree, kbo.coordinate.dec.degree,),
width=2.0 * kbo.dra / 3600.0,
height=2.0 * kbo.ddec / 3600.0,
angle=360 - (kbo.pa + 90), # kbo.pa.to(units.degree).value
edgecolor='none',
facecolor='k',
alpha=0.1))
ax.scatter(kbo.coordinate.ra.degree,
kbo.coordinate.dec.degree,
marker='o',
s=1,
facecolor='none',
edgecolor=c,
alpha=0.5)
new_tick_locations = np.array(list(range(360, 0, -30)))
def tick_function(X):
import calendar
month = (X / 30.0 + 8) % 12 + 1
return [calendar.month_abbr[int(z)] for z in month]
print(new_tick_locations)
if False:
ax.set_xlim(360, 0)
ax2 = ax.twiny()
ax2.set_xticks(new_tick_locations)
ax2.set_xticklabels(tick_function(new_tick_locations))
ax2.set_xlabel("Opp. Month")
ax2.set_xlim(360, 0)
print("SAVING FILE")
savefig('layout40-at' + PLOT_FIELD_EPOCH + '-discov_on-' + DISCOVERY_NEW_MOON + '.pdf')
sys.stderr.write("FINISHED\n")
| gpl-3.0 |
Akshay0724/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/manifold/plot_manifold_sphere.py | 1 | 5115 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter
from mpl_toolkits.mplot3d import Axes3D
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold \
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2) \
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| mit |
brguez/TEIBA | src/python/germlineSrcElements_activityRate.py | 1 | 5501 | #!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def binary(value):
"""
Convert value into binary
"""
# A) Value equal to 0
if (value == 0):
boolean = 0
# B) Value higher than 0
else:
boolean = 1
return boolean
def activityStatus(activityRate):
"""
Classify source element according its activity rate in one of these categories:
Activity ranges:
- none: 0
- low: (0-3]
- moderate: (3-9]
- high: >9
"""
# a) None: 0
if (activityRate == 0):
status = "none"
# b) Low: (0-3]
elif (activityRate > 0) and (activityRate <= 3):
status = "low"
# c) Moderate: (3-9]
elif (activityRate > 3) and (activityRate <= 9):
status = "moderate"
# d) Hot: >9
else:
status = "high"
return status
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import time
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy import stats
import seaborn as sns
import scipy
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('transductionsCountFile', help='')
parser.add_argument('metadata', help='')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
transductionsCountFile = args.transductionsCountFile
metadata = args.metadata
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "transductionsCountFile: ", transductionsCountFile
print "metadata: ", metadata
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
sys.exit
## Start ##
## Make list with european donor ids
#####################################
metadata = open(metadata, 'r')
donorIdEurList = []
for line in metadata:
# Skip header
if not line.startswith("#"):
line = line.rstrip('\n')
line = line.split('\t')
donorId = line[0]
status = line[3]
ancestry = line[4]
if (status == "Whitelist") and (ancestry == "EUR"):
donorIdEurList.append(donorId)
print "donorIdEurList: ", len(donorIdEurList)
### Read transductions count dataframe
########################################
transductionsCountDf = pd.read_csv(transductionsCountFile, header=0, index_col=0, sep='\t')
print "transductionsCountDf: ", transductionsCountDf
### Compute source elements activity rate in the complete cohort
##################################################################
srcElementIds = transductionsCountDf.index
activityRateDf = pd.DataFrame(index=srcElementIds)
## Add the total number of transductions per source element
activityRateDf["totalNbTd"] = transductionsCountDf.sum(axis=1)
## Add the total number of active donors per source element
activeDonorBinaryDf = transductionsCountDf.applymap(binary)
activityRateDf["nbActiveDonors"] = activeDonorBinaryDf.sum(axis=1)
## Add the activity rate:
activityRateDf["activityRate"] = activityRateDf["totalNbTd"]/activityRateDf["nbActiveDonors"]
# for those elements active in 0 donors. Set the activity rate as 0
activityRateDf["activityRate"] = activityRateDf["activityRate"].fillna(0)
## Add the acticity status:
activityRateDf['activityStatus'] = activityRateDf["activityRate"].apply(activityStatus)
## Add the max activity values
activityRateDf["maxActivity"] = transductionsCountDf.max(axis=1)
## Save dataframe into output file
outFilePath = outDir + '/germline_srcElements_activityRate.tsv'
activityRateDf.to_csv(outFilePath, sep='\t')
### Compute source elements activity rate in Europeans
########################################################
transductionsCountEurDf = transductionsCountDf[donorIdEurList]
activityRateEurDf = pd.DataFrame(index=srcElementIds)
## Add the total number of transductions per source element
activityRateEurDf["totalNbTd"] = transductionsCountEurDf.sum(axis=1)
## Add the total number of active donors per source element
activeDonorBinaryEurDf = transductionsCountEurDf.applymap(binary)
activityRateEurDf["nbActiveDonors"] = activeDonorBinaryEurDf.sum(axis=1)
## Add the activity rate:
activityRateEurDf["activityRate"] = activityRateEurDf["totalNbTd"]/activityRateEurDf["nbActiveDonors"]
# for those elements active in 0 donors. Set the activity rate as 0
activityRateEurDf["activityRate"] = activityRateEurDf["activityRate"].fillna(0)
## Add the acticity status:
activityRateEurDf['activityStatus'] = activityRateEurDf["activityRate"].apply(activityStatus)
## Add the max activity values
activityRateEurDf["maxActivity"] = transductionsCountEurDf.max(axis=1)
## Save dataframe into output file
outFilePath = outDir + '/germline_srcElements_activityRateEUR.tsv'
activityRateEurDf.to_csv(outFilePath, sep='\t')
#### End
header("FINISH!!")
| gpl-3.0 |
sbonner0/DeepTopologyClassification | src/legacy/DataGeneration/GenFingerPrint.py | 1 | 1814 | from graph_tool.all import *
import os, csv, time, datetime, sys
import GFP
import pickle
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
def loadDataAndGenPKL(inputdir, filename):
filehandler = open(filename, 'wb')
# Load the graph data. Need to think about the directory structure and balance of the datasets
for subdir, dirs, files in os.walk(inputdir):
for filename in files:
label = subdir.split("/")
label = label[len(label)-1]
g = Graph()
edges = []
filepath = subdir + os.sep + filename
date = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
print date + ": " + filepath
sys.stdout.flush()
with open(filepath) as networkData:
datareader = csv.reader(networkData, delimiter=" ")
for row in datareader:
if not row[0].startswith("#"):
edges.append([int(row[0]), int(row[1])])
networkData.close()
g.add_edge_list(edges, hashed=True) # Very important to hash the values here otherwise it creates too many nodes
g.set_directed(False)
# Pass the graph to the single fingerprint generation method and return the fingerprint vector
fp = GFP.GFPSingleFingerprint(g)
res = [label, filename, fp]
pickle.dump(res, filehandler)
filehandler.close()
return 0
def usage():
print """Usage:\n%s </path/to/input> <pickle filename>\nNB: Piclke created @ launch location unless absloute path used""" % (sys.argv[0])
if __name__ == "__main__":
if len (sys.argv) != 3 :
usage()
sys.exit (1)
loadDataAndGenPKL(sys.argv[1],sys.argv[2])
| gpl-3.0 |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/tree/tests/test_export.py | 37 | 2897 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
def test_graphviz_toy():
"""Check correctness of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"feature0 <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
def test_graphviz_errors():
"""Check for errors of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
COSMOGRAIL/COSMOULINE | pipe/9_quicklook_lc_scripts/7_by_ellipticity_NU.py | 1 | 2966 | """
We color points according to ellipticity.
"""
execfile("../config.py")
from kirbybase import KirbyBase, KBError
import variousfct
import headerstuff
import star
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates
print "You want to analyze the deconvolution %s" %deckey
print "Deconvolved object : %s" % decobjname
if plotnormfieldname == None:
print "I will use the normalization coeffs used for the deconvolution."
else:
print "Using %s for the normalization." % (plotnormfieldname)
deckeynormused = plotnormfieldname
ptsources = star.readmancat(ptsrccat)
print "Number of point sources : %i" % len(ptsources)
print "Names of sources : %s" % ", ".join([s.name for s in ptsources])
db = KirbyBase()
images = db.select(imgdb, [deckeyfilenum], ['\d\d*'], returnType='dict', useRegExp=True, sortFields=['mjd'])
print "%i images" % len(images)
fieldnames = db.getFieldNames(imgdb)
plt.figure(figsize=(15,15))
mhjds = np.array([image["mhjd"] for image in images])
ells = np.array([image["ell"] for image in images])
for s in ptsources:
fluxfieldname = "out_%s_%s_flux" % (deckey, s.name)
mags = -2.5*np.log10(np.array([image[fluxfieldname]*image[deckeynormused] for image in images]))
plt.scatter(mhjds, mags, s=12, c=ells, vmin=0.01,vmax=0.3, edgecolors='none')
plt.grid(True)
# reverse y axis for magnitudes :
ax=plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_xlim(np.min(mhjds), np.max(mhjds)) # DO NOT REMOVE THIS !!!
# IT IS IMPORTANT TO GET THE DATES RIGHT
#plt.title(deckey, fontsize=20)
plt.xlabel('MHJD [days]')
plt.ylabel('Magnitude (instrumental)')
titletext1 = "%s (%i points)" % (xephemlens.split(",")[0], len(images))
titletext2 = deckey
ax.text(0.02, 0.97, titletext1, verticalalignment='top', horizontalalignment='left', transform=ax.transAxes)
ax.text(0.02, 0.94, titletext2, verticalalignment='top', horizontalalignment='left', transform=ax.transAxes)
if plotnormfieldname:
titletext3 = "Renormalized with %s" % (plotnormfieldname)
ax.text(0.02, 0.91, titletext3, verticalalignment='top', horizontalalignment='left', transform=ax.transAxes)
cbar = plt.colorbar(orientation='horizontal')
cbar.set_label('Ellipticity')
# Second x-axis with actual dates :
yearx = ax.twiny()
yearxmin = headerstuff.DateFromJulianDay(np.min(mhjds) + 2400000.5)
yearxmax = headerstuff.DateFromJulianDay(np.max(mhjds) + 2400000.5)
yearx.set_xlim(yearxmin, yearxmax)
yearx.xaxis.set_minor_locator(matplotlib.dates.MonthLocator())
yearx.xaxis.set_major_locator(matplotlib.dates.YearLocator())
yearx.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y'))
yearx.xaxis.tick_top()
yearx.set_xlabel("Date")
if savefigs:
if plotnormfieldname:
plotfilepath = os.path.join(plotdir, "%s_lc_%s_by_ellipticity.pdf" % (deckey, plotnormfieldname))
else :
plotfilepath = os.path.join(plotdir, "%s_lc_by_ellipticity.pdf" % (deckey))
plt.savefig(plotfilepath)
print "Wrote %s" % (plotfilepath)
else:
plt.show()
| gpl-3.0 |
chemreac/chemreac | setup.py | 2 | 8157 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import pprint
import re
import shutil
import subprocess
import sys
import warnings
from setuptools import setup, Extension
try:
import cython
except ImportError:
_HAVE_CYTHON = False
else:
_HAVE_CYTHON = True
assert cython # silence pep8
pkg_name = 'chemreac'
url = 'https://github.com/chemreac/' + pkg_name
license = 'BSD'
def _path_under_setup(*args):
return os.path.join('.', *args)
release_py_path = _path_under_setup(pkg_name, '_release.py')
config_py_path = _path_under_setup(pkg_name, '_config.py')
env = None # silence pyflakes, 'env' is actually set on the next line
exec(open(config_py_path).read())
for k, v in list(env.items()):
env[k] = os.environ.get('%s_%s' % (pkg_name.upper(), k), v)
_version_env_var = '%s_RELEASE_VERSION' % pkg_name.upper()
RELEASE_VERSION = os.environ.get(_version_env_var, '')
_version_env_var = '%s_RELEASE_VERSION' % pkg_name.upper()
RELEASE_VERSION = os.environ.get(_version_env_var, '')
if len(RELEASE_VERSION) > 1:
if RELEASE_VERSION[0] != 'v':
raise ValueError("$%s does not start with 'v'" % _version_env_var)
TAGGED_RELEASE = True
__version__ = RELEASE_VERSION[1:]
else: # set `__version__` from _release.py:
TAGGED_RELEASE = False
exec(open(release_py_path).read())
if __version__.endswith('git'):
try:
_git_version = subprocess.check_output(
['git', 'describe', '--dirty']).rstrip().decode('utf-8')
except subprocess.CalledProcessError:
warnings.warn("A git-archive is being installed - version information incomplete.")
else:
if 'develop' not in sys.argv:
warnings.warn("Using git to derive version: dev-branches may compete.")
_ver_tmplt = r'\1.post\2' if os.environ.get('CONDA_BUILD', '0') == '1' else r'\1.post\2+\3'
__version__ = re.sub(r'v([0-9.]+)-(\d+)-(\S+)', _ver_tmplt, _git_version) # .dev < '' < .post
_WITH_DEBUG = env['WITH_DEBUG'] == '1'
_WITH_OPENMP = env['WITH_OPENMP'] == '1'
_WITH_DATA_DUMPING = env['WITH_DATA_DUMPING'] == '1'
# Source distributions contain rendered sources
_common_requires = ['numpy>=1.16.6', 'block_diag_ilu>=0.5.1', 'pycvodes>=0.13.1', 'finitediff>=0.6.3']
install_requires = _common_requires + ['chempy>=0.7.11', 'quantities>=0.12.1']
package_include = os.path.join(pkg_name, 'include')
_src = {ext: _path_under_setup(pkg_name, '_%s.%s' % (pkg_name, ext)) for ext in "cpp pyx".split()}
if _HAVE_CYTHON and os.path.exists(_src["pyx"]):
# Possible that a new release of Python needs a re-rendered Cython source,
# or that we want to include possible bug-fix to Cython, disable by manually
# deleting .pyx file from source distribution.
USE_CYTHON = True
if os.path.exists(_src['cpp']):
os.unlink(_src['cpp']) # ensure c++ source is re-generated.
else:
USE_CYTHON = False
ext_modules = []
if len(sys.argv) > 1 and '--help' not in sys.argv[1:] and sys.argv[1] not in (
'--help-commands', 'egg_info', 'clean', '--version'):
import numpy as np
import finitediff as fd
import pycvodes as pc
from pycvodes._libs import get_libs as pc_get_libs
import block_diag_ilu as bdi
setup_requires = _common_requires + ['mako>=1.0'] + (["cython>=0.29.15"] if USE_CYTHON else [])
rendered_path = 'src/chemreac.cpp'
template_path = rendered_path + '.mako'
if os.path.exists(template_path):
from mako.template import Template
from mako.exceptions import text_error_template
subsd = {'WITH_OPENMP': _WITH_OPENMP}
try:
rendered = Template(open(template_path, 'rt').read()).render(**subsd)
except:
sys.stderr.write(text_error_template().render_unicode())
raise
else:
open(rendered_path, 'wt').write(rendered)
sources = [_src["pyx" if USE_CYTHON else "cpp"]]
ext_modules.append(Extension('{0}._{0}'.format(pkg_name), sources))
if USE_CYTHON:
from Cython.Build import cythonize
ext_modules = cythonize(ext_modules, include_path=[
package_include,
pc.get_include(),
os.path.join('external', 'anyode', 'cython_def')
])
if not os.path.exists(os.path.join('external', 'anyode', 'cython_def', 'anyode.pxd')):
raise FileNotFoundError("No anyode.pxd?")
ext_modules[0].include_dirs += [
np.get_include(), fd.get_include(), bdi.get_include(),
pc.get_include(), package_include, os.path.join('external', 'anyode', 'include')
]
ext_modules[0].sources = [rendered_path] + ext_modules[0].sources
ext_modules[0].language = 'c++'
ext_modules[0].extra_compile_args = ['-std=c++11'] + (['-fopenmp'] if _WITH_OPENMP else [])
ext_modules[0].define_macros += (
([('CHEMREAC_WITH_DEBUG', None)] if _WITH_DEBUG else []) +
([('CHEMREAC_WITH_DATA_DUMPING', None)] if _WITH_DATA_DUMPING else []) +
([('BLOCK_DIAG_ILU_WITH_OPENMP', None)] if os.environ.get('BLOCK_DIAG_ILU_WITH_OPENMP', '') == '1' else [])
)
ext_modules[0].libraries += [l for l in (pc_get_libs().split(',') + os.environ.get(
'CHEMREAC_LAPACK', "lapack,blas").split(",")) if l != ""] + ['m']
else:
setup_requires = []
modules = [
pkg_name+'.util',
]
tests = [
pkg_name+'.tests',
pkg_name+'.util.tests',
]
classifiers = [
"Development Status :: 3 - Alpha",
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
]
with io.open(_path_under_setup(pkg_name, '__init__.py'), 'rt', encoding='utf-8') as f:
short_description = f.read().split('"""')[1].split('\n')[1]
if not 10 < len(short_description) < 255:
warnings.warn("Short description from __init__.py proably not read correctly.")
long_description = io.open(_path_under_setup('README.rst'),
encoding='utf-8').read()
if not len(long_description) > 100:
warnings.warn("Long description from README.rst probably not read correctly.")
_author, _author_email = io.open(_path_under_setup('AUTHORS'), 'rt', encoding='utf-8').readline().split('<')
setup_kwargs = dict(
name=pkg_name,
version=__version__,
description=short_description,
long_description=long_description,
author=_author.strip(),
author_email=_author_email.split('>')[0].strip(),
url=url,
license=license,
keywords=["chemical kinetics", "Smoluchowski equation",
"advection-diffusion-reaction"],
packages=[pkg_name] + modules + tests,
include_package_data=True,
package_data={
'chemreac.tests': ['*.json', '*.txt']
},
ext_modules=ext_modules,
classifiers=classifiers,
setup_requires=setup_requires,
install_requires=install_requires,
extras_require={'all': [
'argh', 'pytest', 'scipy>=0.19.1', 'matplotlib', 'mpld3',
'sym>=0.3.4', 'sympy>=1.1.1,!=1.2', 'pyodeint>=0.10.4', 'pygslodeiv2>=0.9.1', 'batemaneq>=0.2.2',
'sphinx', 'sphinx_rtd_theme', 'numpydoc', 'pyodesys>=0.13.1'
]},
python_requires='>=3.6',
)
if __name__ == '__main__':
try:
if TAGGED_RELEASE:
# Same commit should generate different sdist files
# depending on tagged version (see RELEASE_VERSION)
# this will ensure source distributions contain the correct version
shutil.move(release_py_path, release_py_path+'__temp__')
open(release_py_path, 'wt').write(
"__version__ = '{}'\n".format(__version__))
shutil.move(config_py_path, config_py_path+'__temp__')
with open(config_py_path, 'wt') as fh:
fh.write("env = {}\n".format(pprint.pformat(env)))
setup(**setup_kwargs)
finally:
if TAGGED_RELEASE:
shutil.move(release_py_path+'__temp__',
release_py_path)
shutil.move(config_py_path+'__temp__',
config_py_path)
| bsd-2-clause |
fengzhyuan/scikit-learn | sklearn/linear_model/randomized_l1.py | 33 | 23358 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
eteq/bokeh | examples/plotting/file/unemployment.py | 46 | 1846 | from collections import OrderedDict
import numpy as np
from bokeh.plotting import ColumnDataSource, figure, show, output_file
from bokeh.models import HoverTool
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
source = ColumnDataSource(
data=dict(month=month, year=year, color=color, rate=rate)
)
output_file('unemployment.html')
TOOLS = "resize,hover,save,pan,box_zoom,wheel_zoom"
p = figure(title="US Unemployment (1948 - 2013)",
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
toolbar_location="left", tools=TOOLS)
p.rect("year", "month", 1, 1, source=source,
color="color", line_color=None)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('date', '@month @year'),
('rate', '@rate'),
])
show(p) # show the plot
| bsd-3-clause |
tcchenbtx/project-zeta-J | code/linear_model_scripts_sub5.py | 3 | 25730 | # Goal for this scripts:
#
# Perform linear regression and analyze the similarity in terms of the activated brain area when recognizing different
# objects in odd and even runs of subject 1
# Load required function and modules:
from __future__ import print_function, division
import numpy as np
import numpy.linalg as npl
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import gridspec
import os
import re
import json
import nibabel as nib
from utils import subject_class as sc
from utils import outlier
from utils import diagnostics as diagnos
from utils import get_object_neural as neural
from utils import stimuli
from utils import convolution as convol
from utils import smooth as sm
from utils import linear_model as lm
from utils import maskfunc as msk
from utils import affine
import copy
# important path:
base_path = os.path.abspath(os.path.dirname(__file__))
base_path = os.path.join(base_path, "..")
# where to store figures
figure_path = os.path.join(base_path, "code", "images", "")
# where to store txt files
file_path = os.path.join(base_path, "code", "txt", "")
# help to make directory to save figures and txt files
# if figure folder doesn't exist -> make it
if not os.path.exists(figure_path):
os.makedirs(figure_path)
# if txt folder doesn't exist -> make it
if not os.path.exists(file_path):
os.makedirs(file_path)
# color display:
# list of all objects in this study in alphabetical order
object_list = ["bottle", "cat", "chair", "face", "house", "scissors", "scrambledpix", "shoe"]
# assign color for task time course for each object
color_list_s = ["b", "g", "r", "c", "m", "y", "k", "sienna"]
match_color_s = dict(zip(object_list, color_list_s))
# assign color for convolved result for each object
color_list_c = ["royalblue", "darksage", "tomato", "cadetblue", "orchid", "goldenrod", "dimgrey", "sandybrown"]
match_color_c = dict(zip(object_list, color_list_c))
# color for showing beta values
nice_cmap_values = np.loadtxt(file_path + 'actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# assign object parameter number: each object has a iterable number
match_para = dict(zip(object_list, range(8)))
# check slice number:
# this is the specific slice we use to run 2D correlation
slice_number = 35
# separator for better report display
sec_separator = '#' * 80
separator = "-" * 80
# which subject to work on?
subid = "sub005"
# work on results from this subject:
################################### START #####################################
print (sec_separator)
print ("Project-Zeta: use linear regression to study ds105 dataset")
print (separator)
print ("Focus on %s for the analysis" % subid)
print (sec_separator)
print ("Progress: Clean up data")
print (separator)
# load important data for this subject by using subject_class
sub = sc.subject(subid)
# get image files of this subject:
sub_img = sub.run_img_result
# get run numbers of this subject:
run_num = len(sub.run_keys)
# report keys of all images:
print ("Import %s images" % subid)
print (separator)
print ("These images are imported:")
img_key = sub_img.keys()
img_key = sorted(img_key)
for i in img_key:
print (i)
# report how many runs in this subject
print ("There are %d runs for %s" % (run_num, subid))
print (separator)
# get data for those figures
print ("Get data from images...")
sub_data = {}
for key, img in sub_img.items():
sub_data[key] = img.get_data()
print ("Complete!")
print (separator)
# use rms_diff to check outlier for all runs of this subject
print ("Analyze outliers in these runs:")
for key, data in sub_data.items():
rms_diff = diagnos.vol_rms_diff(data)
# get outlier indices and the threshold for the outlier
rms_outlier_indices, rms_thresh = diagnos.iqr_outliers(rms_diff)
y_value2 = [rms_diff[i] for i in rms_outlier_indices]
# create figures to show the outlier
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.55, 0.75])
ax.plot(rms_diff, label='rms')
ax.plot([0, len(rms_diff)], [rms_thresh[1], rms_thresh[1]], "k--",\
label='high threshold', color='m')
ax.plot([0, len(rms_diff)], [rms_thresh[0], rms_thresh[0]], "k--",\
label='low threshold', color='c')
ax.plot(rms_outlier_indices, y_value2, 'o', color='g', label='outlier')
# label the figure
ax.set_xlabel('Scan time course')
ax.set_ylabel('Volumne RMS difference')
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., numpoints=1)
fig.text(0.05, 0.9, 'Volume RMS Difference with Outliers for %s' % key, weight='bold')
# save figure
fig.savefig(figure_path + 'Volume_RMS_Difference_Outliers_%s.png' % key)
# clear figure
fig.clf()
# close pyplot window
plt.close()
# report
print ("Outlier analysis results are saved as figures!")
print (separator)
# remove outlier from images
sub_clean_img, outlier_index = outlier.remove_data_outlier(sub_img)
print ("Remove outlier:")
print ("outliers are removed from each run!")
print (sec_separator)
# run generate predicted bold signals:
print ("Progress: create predicted BOLD signals based on condition files")
# get general all tr times == 121*2.5 = about 300 s
# this is the x-axis to plot hemodynamic prediction
all_tr_times = np.arange(sub.BOLD_shape[-1]) * sub.TR
# the y-axis to plot hemodynamic prediction is the neural value from condition (on-off)
sub_neural = neural.get_object_neural(sub.sub_id, sub.conditions, sub.TR, sub.BOLD_shape[-1])
# report info for all run details
print ("The detailed run info for %s:" % subid)
neural_key = sub_neural.keys()
neural_key = sorted(neural_key)
for i in neural_key:
print (i)
print (separator)
# get task time course for all runs -> save as images
print ("generate task time course images")
print (separator)
for run in range(1, run_num):
# make plots to display task time course
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.55, 0.75])
for item in object_list:
check_key = "run0%02d-%s" % (run, item)
ax.plot(all_tr_times, sub_neural[check_key][0], label="%s" % item, c=match_color_s[item])
# make labels:
ax.set_title("Task time course for %s-run0%02d" % (subid, run), weight='bold')
ax.set_xlabel("Time course (second)")
ax.set_ylabel("Task (Off = 0, On = 1)")
ax.set_yticks([0, 1])
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., numpoints=1)
# save figure
fig.savefig(figure_path + "Task_time_course_%s_run0%02d" % (subid, run))
# clear figure
fig.clf()
# close pyplot window
plt.close()
# report
print ("task time course images are saved!")
print (separator)
# assume true HRF starts at zero, and gets to zero sometime before 35 seconds.
tr_times = np.arange(0, 30, sub.TR)
hrf_at_trs = convol.hrf(tr_times)
# get convolution data for each objects in this run -> show figure for run001
print ("Work on convolution based on condition files:")
print (separator)
sub_convolved = convol.get_all_convolved(sub_neural, hrf_at_trs, file_path)
print ("convolution analysis for all runs is complete")
print (separator)
# save convolved data
for key, data in sub_convolved.items():
np.savetxt(file_path + "convolved_%s.txt" % key, data)
print ("convolved results are saved as txt files")
# show relationship between task time course and bold signals
print ("Show relationship between task time course and predicted BOLD signals")
# get keys for each neural conditions
sub_neural_key = sub_neural.keys()
# sort key for better display
sub_neural_key = sorted(sub_neural_key)
# create figures
fig = plt.figure()
for run in range(1, run_num):
ax = plt.subplot(111)
ax2 = ax.twinx()
figures = {}
count = 0
for item in object_list:
# focus on one at a time:
check_key = "run0%02d-%s" % (run, item)
# perform convolution to generate estimated BOLD signals
convolved = convol.convolution(sub_neural[check_key][0], hrf_at_trs)
# plot the task time course and the estimated BOLD signals in same plot
# plot the estimated BOLD signals
# plot the task time course
figures["fig" + "%s" % str(count)] = ax.plot(all_tr_times, sub_neural[check_key][0], c=match_color_s[item], label="%s-task" % item)
count += 1
# plot estimated BOLD signal
figures["fig" + "%s" % str(count)] = ax2.plot(all_tr_times, convolved, c=match_color_c[item], label="%s-BOLD" % item)
count += 1
# label this plot
plt.subplots_adjust(left=0.1, right=0.6, bottom=0.1, top=0.85)
plt.text(0.25, 1.05, "Hemodynamic prediction of %s-run0%02d" % (subid, run), weight='bold')
ax.set_xlabel("Time course (second)")
ax.set_ylabel("Task (Off = 0, On = 1)")
ax.set_yticks([-0.2, 0, 0.2, 0.4, 0.6, 0.8, 1.0])
ax2.set_ylabel("Estimated BOLD signal")
ax2.set_yticks([-0.2, 0, 0.2, 0.4, 0.6, 0.8, 1.0])
# label legend
total_figures = figures["fig0"]
for i in range(1, len(figures)):
total_figures += figures["fig" + "%s" % str(i)]
labs = [fig.get_label() for fig in total_figures]
ax.legend(total_figures, labs, bbox_to_anchor=(1.2, 1.0), loc=0, borderaxespad=0., fontsize=11)
# save plot
plt.savefig(figure_path + "%s_run0%02d_bold_prediction.png" % (subid, run))
# clear plot
plt.clf()
# close pyplot window
plt.close()
print (sec_separator)
# remove outlier from convolved results
print("Progress: clean up convolved results")
sub_convolved = convol.remove_outlier(sub.sub_id, sub_convolved, outlier_index)
print ("Outliers are removed from convolved results")
print (sec_separator)
# smooth the images:
print ("Progress: Smooth images")
# subject clean and smooth img == sub_cs_img
sub_cs_img = sm.smooth(sub_clean_img)
print ("Smooth images: Complete!")
print (sec_separator)
# get shape info of the images
print ("Progress: record shape information")
shape = {}
for key, img in sub_cs_img.items():
shape[key] = img.shape
print ("shape of %s = %s" % (key, shape[key]))
with open(file_path+'new_shape.json', 'a') as fp:
json.dump(shape, fp)
print ("New shape info of images is recorded and saved as file")
print (sec_separator)
############################## Linear regression ##############################
print ("Let's run Linear regression")
print (separator)
# generate design matrix
print ("Progress: generate design matrix")
# generate design matrix for each runs
design_matrix = lm.batch_make_design(sub_cs_img, sub_convolved)
# check parameter numbers
parameters = design_matrix["%s_run001" % subid].shape[-1]
print ("parameter number: %d" % parameters)
print ("Design matrix generated")
print (separator)
# rescale design matrix
print ("Progress: rescale design matrix")
design_matrix = lm.batch_scale_matrix(design_matrix)
print ("Rescale design matrix: complete!")
# save scaled design matrix as figure
# plot scaled design matrix
fig = plt.figure(figsize=(8.0, 8.0))
for key, matrix in design_matrix.items():
ax = plt.subplot(111)
ax.imshow(matrix, aspect=0.1, interpolation="nearest", cmap="gray")
# label this plot
fig.text(0.15, 0.95, "scaled design matrix for %s" % key, weight='bold', fontsize=18)
ax.set_xlabel("Parameters", fontsize=16)
ax.set_xticklabels([])
ax.set_ylabel("Scan time course", fontsize=16)
# save plot
plt.savefig(figure_path + "design_matrix_%s" % key)
# clean plot
plt.clf()
# close pyplot window
plt.close()
print ("Design matrices are saved as figures")
print (separator)
# use maskfunc to generate mask
print ("Progress: generate mask for brain images")
mask, mean_data = msk.generateMaskedBrain(sub_clean_img)
print ("Generate mask for brain images: complete!")
# save mask as figure
for key, each in mask.items():
for i in range(1, 90):
plt.subplot(9, 10, i)
plt.imshow(each[:, :, i], interpolation="nearest", cmap="gray", alpha=0.5)
# label plot
ax = plt.gca()
ax.set_xticklabels([])
ax.set_yticklabels([])
# save plot
plt.savefig(figure_path + "all_masks_for_%s.png" % key)
# clear plot
plt.clf()
# close pyplot window
plt.close()
print (separator)
# run linear regression to generate betas
# first step: use mask to get data and reshape to 2D
print ("Progress: Use mask to subset brain images")
sub_cs_mask_img = lm.apply_mask(sub_cs_img, mask)
sub_cs_mask_img_2d = lm.batch_convert_2d_based(sub_cs_mask_img, shape)
# sub1_cs_mask_img_2d = lm.batch_convert_2d(sub1_cs_mask_img)
print ("Use mask to subset brain images: complete!")
print (separator)
# second step: run linear regression to get betas:
print ("Progress: Run linear regression to get beta hat")
all_betas = {}
for key, img in sub_cs_mask_img_2d.items():
#img_2d = np.reshape(img, (-1, img.shape[-1]))
Y = img.T
all_betas[key] = npl.pinv(design_matrix[key]).dot(Y)
print ("Getting betas from linear regression: complete!")
print (separator)
# third step: put betas back into it's original place:
print ("Save beta figures:")
beta_vols = {}
raw_beta_vols = {}
for key, betas in all_betas.items():
# create 3D zeros to hold betas
beta_vols[key] = np.zeros(shape[key][:-1] + (parameters,))
# get the mask info
check_mask = (mask[key] == 1)
# fit betas back to 3D
beta_vols[key][check_mask] = betas.T
print ("betas of %s is fitted back to 3D!" % key)
# save 3D betas in dictionary
raw_beta_vols[key] = beta_vols[key]
# get min and max of figure
vmin = beta_vols[key][:, :, 21:70].min()
vmax = beta_vols[key][:, :, 21:70].max()
# clear the background
beta_vols[key][~check_mask] = np.nan
mean_data[key][~check_mask] = np.nan
# plot betas
fig = plt.figure(figsize=(8.0, 8.0))
for item in object_list:
# plot 50 pictures
fig, axes = plt.subplots(nrows=5, ncols=10)
lookat = 20
for ax in axes.flat:
# show plot from z= 21~70
ax.imshow(mean_data[key][:, :, lookat], interpolation="nearest", cmap="gray", alpha=0.5)
im = ax.imshow(beta_vols[key][:, :, lookat, match_para[item]], cmap=nice_cmap, alpha=0.5)
# label the plot
ax.set_xticks([])
ax.set_yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
lookat += 1
# label the plot
fig.subplots_adjust(bottom=0.2, hspace=0)
fig.text(0.28, 0.9, "Brain area responding to %s in %s" % (item, subid), weight='bold')
# color bar
cbar_ax = fig.add_axes([0.15, 0.08, 0.7, 0.04])
fig.colorbar(im, cax=cbar_ax, ticks=[], orientation='horizontal')
fig.text(0.35, 0.15, "Relative responding intensity")
fig.text(0.095, 0.09, "Low")
fig.text(0.87, 0.09, "High")
# save plot
plt.savefig(figure_path + "betas_for_%s_%s.png" % (key, item))
# clear plot
plt.clf()
# close pyplot window
plt.close()
# report
print ("beta figures are generated!!")
print (sec_separator)
# analyze based on odd runs even runs using affine
print ("Progress: Use affine matrix to check brain position")
print ("print affine matrix for each images:")
affine_matrix = {}
for key, img in sub_img.items():
affine_matrix[key] = img.affine
print("%s :\n %s" % (key, img.affine))
# check if they all have same affine matrix
same_affine = True
check_matrix = affine_matrix["%s_run001" % subid]
for key, aff in affine_matrix.items():
if aff.all() != check_matrix.all():
same_affine = False
if same_affine:
print ("They have the same affine matrix!")
else:
print ("They don't have same affine matrix -> be careful about the brain position")
print (sec_separator)
############################## 2D correlation #################################
# Focus on 2D slice to run the analysis:
print ("Progress: Try 2D correlation")
print ("Focus on one slice: k = %d" % slice_number)
print (separator)
print ("Run correlation between run1_house, run2_house and run2_face")
# get 2D slice for run1 house
run1_house = raw_beta_vols["%s_run001" % subid][:, 25:50, slice_number, 5]
# save as plot
plt.imshow(run1_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("%s_Run1_House" % subid)
plt.savefig(figure_path + "%s_run1_house.png" % subid)
plt.clf()
# get 2D slice of run2 house
run2_house = raw_beta_vols["%s_run002" % subid][:, 25:50, slice_number, 5]
# save as plot
plt.imshow(run2_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("%s_Run2_House" % subid)
plt.savefig(figure_path + "%s_run2_house.png" % subid)
plt.clf()
# get 2D slice for run2 face
run2_face = raw_beta_vols["%s_run002" % subid][:, 25:50, slice_number, 4]
# save as plot
plt.imshow(run2_face, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("%s_Run2_Face" % subid)
plt.savefig(figure_path + "%s_run2_face.png" % subid)
plt.close()
# put those 2D plots together
fig = plt.figure()
plt.subplot(1, 3, 1, xticks=[], yticks=[], xticklabels=[], yticklabels=[])
plt.imshow(run1_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("Sub%s_Run1_House" % subid[-1], weight='bold', fontsize=10)
plt.subplot(1, 3, 2, xticks=[], yticks=[])
plt.imshow(run2_house, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("Sub%s_Run2_House" % subid[-1], weight='bold', fontsize=10)
plt.subplot(1, 3, 3, xticks=[], yticks=[])
plt.imshow(run2_face, interpolation="nearest", cmap=nice_cmap, alpha=0.5)
plt.title("Sub%s_Run2_Face" % subid[-1], weight='bold', fontsize=10)
# label plot
fig.subplots_adjust(bottom=0.2, hspace=0)
cbar_ax = fig.add_axes([0.15, 0.08, 0.7, 0.04])
plt.colorbar(cax=cbar_ax, ticks=[], orientation='horizontal')
fig.text(0.35, 0.15, "Relative responding intensity")
fig.text(0.095, 0.09, "Low")
fig.text(0.87, 0.09, "High")
# save plot
plt.savefig(figure_path + "%s_run_figure_compile.png" % subid)
# close pyplot window
plt.close()
print ("plots for analysis are saved as figures")
print ("Progress: Run correlation coefficient")
# create a deepcopy of raw_beta_vols for correlation analysis:
raw_beta_vols_corr = copy.deepcopy(raw_beta_vols)
# flatten the 2D matrix
house1 = np.ravel(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, match_para["house"]])
house2 = np.ravel(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, match_para["house"]])
face2 = np.ravel(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, match_para["face"]])
# save flatten results for further analysis
np.savetxt(file_path + "%s_house1.txt" % subid, house1)
np.savetxt(file_path + "%s_house2.txt" % subid, house2)
np.savetxt(file_path + "%s_face2.txt" % subid, face2)
# change nan to 0 in the array
house1[np.isnan(house1)] = 0
house2[np.isnan(house2)] = 0
face2[np.isnan(face2)] = 0
# correlation coefficient study:
house1_house2 = np.corrcoef(house1, house2)
house1_face2 = np.corrcoef(house1, face2)
print ("%s run1 house vs run2 house: %s" % (subid, house1_house2))
print ("%s run1 house vs run2 face : %s" % (subid, house1_face2))
print (sec_separator)
# save individual 2D slice as txt for further analysis
print ("save 2D result for each object and each run individually as txt file")
for i in range(1, run_num+1):
for item in object_list:
temp = raw_beta_vols_corr["%s_run0%02d" % (subid, i)][:, 25:50, slice_number, match_para[item]]
np.savetxt(file_path + "%s_run0%02d_%s.txt" % (subid, i, item), np.ravel(temp))
print ("Complete!!")
print (sec_separator)
# analyze based on odd runs even runs
print ("Progress: prepare data to run correlation based on odd runs and even runs:")
print ("Take average of odd run / even run results to deal with impacts of variations between runs")
even_run = {}
odd_run = {}
even_count = 0
odd_count = 0
# add up even run results / odd run results and take mean for each groups
for item in object_list:
even_run[item] = np.zeros_like(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, 5])
odd_run[item] = np.zeros_like(raw_beta_vols_corr["%s_run001" % subid][:, 25:50, slice_number, 5])
print ("make average of odd run results:")
# add up odd run results
for i in range(1, run_num+1, 2):
temp = raw_beta_vols_corr["%s_run0%02d" % (subid, i)][:, 25:50, slice_number, match_para[item]]
temp[np.isnan(temp)] = 0
odd_run[item] += temp
odd_count += 1
print("odd runs: %d-%s" % (i, item))
print ("make average od even run results:")
# take mean
odd_run[item] = odd_run[item]/odd_count
# add up even run results
for i in range(2, run_num+1, 2):
temp = raw_beta_vols_corr["%s_run0%02d" % (subid, i)][:, 25:50, slice_number, match_para[item]]
temp[np.isnan(temp)] = 0
even_run[item] += temp
even_count += 1
print("even: %d, %s" % (i, item))
# take mean
even_run[item] = even_run[item]/even_count
print (separator)
# save odd run and even run results as txt file
print ("Progress: save flatten mean odd / even run results as txt files")
for key, fig in even_run.items():
np.savetxt(file_path + "%s_even_%s.txt" % (subid, key), np.ravel(fig))
for key, fig in odd_run.items():
np.savetxt(file_path + "%s_odd_%s.txt" % (subid, key), np.ravel(fig))
print ("odd run and even run results are saved as txt files!!!!!")
print (separator)
############################ 3D correlation ###################################
# check 3D:
print ("Focus on one 3D analysis, shape = [:, 25:50, 31:36]")
# put 3D slice of run1 house, run2 face, run2 house together
fig = plt.figure()
i = 1
run1_house = raw_beta_vols["%s_run001" % subid][:, 25:50, 33:38, match_para["house"]]
for z in range(5):
plt.subplot(3, 5, i, xticks=[], yticks=[])
plt.imshow(run1_house[:, :, z], interpolation="nearest", cmap=nice_cmap, alpha=0.5)
i += 1
if z == 2:
plt.title("%s_run1_house" % subid)
run2_house = raw_beta_vols["%s_run002" % subid][:, 25:50, 33:38, match_para["house"]]
for z in range(5):
plt.subplot(3, 5, i, xticks=[], yticks=[])
plt.imshow(run2_house[:, :, z], interpolation="nearest", cmap=nice_cmap, alpha=0.5)
i += 1
if z == 2:
plt.title("%s_run2_house" % subid)
run2_face = raw_beta_vols["%s_run002" % subid][:, 25:50, 33:38, match_para["face"]]
for z in range(5):
plt.subplot(3, 5, i, xticks=[], yticks=[])
plt.imshow(run2_face[:, :, z], interpolation="nearest", cmap=nice_cmap, alpha=0.5)
i += 1
if z == 2:
plt.title("%s_run2_face" % subid)
# label plot
fig.subplots_adjust(bottom=0.2, hspace=0.5)
cbar_ax = fig.add_axes([0.15, 0.06, 0.7, 0.02])
plt.colorbar(cax=cbar_ax, ticks=[], orientation='horizontal')
fig.text(0.35, 0.1, "Relative responding intensity")
fig.text(0.095, 0.07, "Low")
fig.text(0.87, 0.07, "High")
plt.savefig(figure_path + "Try_3D_correlation_%s.png" % subid)
plt.close()
# try to run 3D correlation study:
print ("Progress: Run correlation coefficient with 3D data")
# make a deepcopy of the raw_beta_vols for correlation study:
raw_beta_vols_3d_corr = copy.deepcopy(raw_beta_vols)
# get flatten 3D slice:
house1_3d = np.ravel(raw_beta_vols_3d_corr["%s_run001" % subid][:, 25:50, 33:38, match_para["house"]])
house2_3d = np.ravel(raw_beta_vols_3d_corr["%s_run002" % subid][:, 25:50, 33:38, match_para["house"]])
face2_3d = np.ravel(raw_beta_vols_3d_corr["%s_run002" % subid][:, 25:50, 33:38, match_para["face"]])
# change nan to 0 in the array
house1_3d[np.isnan(house1_3d)] = 0
house2_3d[np.isnan(house2_3d)] = 0
face2_3d[np.isnan(face2_3d)] = 0
# correlation coefficient study:
threeD_house1_house2 = np.corrcoef(house1_3d, house2_3d)
threeD_house1_face2 = np.corrcoef(house1_3d, face2_3d)
print ("%s run1 house vs run2 house in 3D: %s" % (subid, threeD_house1_house2))
print ("%s run1 house vs run2 face in 3D: %s" % (subid, threeD_house1_face2))
print (separator)
# prepare data to analyze 3D brain based on odd runs even runs
print ("Prepare data to analyze \"3D\" brain based on odd runs and even runs:")
print ("Take average of \"3D\" odd runs / even runs to deal with impacts of variations between runs")
even_run_3d = {}
odd_run_3d = {}
# add up even run results / odd run results and take mean for each groups
for item in object_list:
even_run_3d[item] = np.zeros_like(raw_beta_vols_3d_corr["%s_run001" % subid][:, 25:50, 33:38, match_para[item]])
odd_run_3d[item] = np.zeros_like(raw_beta_vols_3d_corr["%s_run001" % subid][:, 25:50, 33:38, match_para[item]])
print ("make average of \"3D\" odd run results:")
# add up odd runs results
for i in range(1, run_num+1, 2):
temp = raw_beta_vols_3d_corr["%s_run0%02d" % (subid, i)][:, 25:50, 33:38, match_para[item]]
temp[np.isnan(temp)] = 0
odd_run_3d[item] += temp
print("odd runs 3D: %d-%s" % (i, item))
# take mean
odd_run_3d[item] = odd_run_3d[item]/odd_count
print ("make average of \"3D\" even run results:")
# add up even runs results
for i in range(2, run_num+1, 2):
temp = raw_beta_vols_3d_corr["%s_run0%02d" % (subid, i)][:, 25:50, 33:38, match_para[item]]
temp[np.isnan(temp)] = 0
even_run_3d[item] += temp
print("even runs 3D: %d-%s" % (i, item))
# take mean
even_run_3d[item] = even_run_3d[item]/even_count
# save odd run and even run results as txt file
for key, fig in even_run_3d.items():
np.savetxt(file_path + "%s_even_%s_3d.txt" % (subid, key), np.ravel(fig))
for key, fig in odd_run_3d.items():
np.savetxt(file_path + "%s_odd_%s_3d.txt" % (subid, key), np.ravel(fig))
print ("\"3D\" odd run and even run results are saved as txt files!!!!!")
print (separator)
print ("Analysis and Data Pre-processing for %s : Complete!!!" % subid) | bsd-3-clause |
FabriceSalvaire/PySpice | examples/power-supplies/hp54501a-cem.py | 1 | 2377 | #r# ================
#r# CEM Simulation
#r# ================
#r# This example show a CEM simulation.
# Fixme: retrieve PDF reference and complete
####################################################################################################
import matplotlib.pyplot as plt
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Doc.ExampleTools import find_libraries
from PySpice.Probe.Plot import plot
from PySpice.Spice.Library import SpiceLibrary
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit import *
####################################################################################################
libraries_path = find_libraries()
spice_library = SpiceLibrary(libraries_path)
####################################################################################################
from HP54501A import HP54501A
#f# literal_include('HP54501A.py')
####################################################################################################
circuit = Circuit('HP54501A CEM')
circuit.include(spice_library['1N4148'])
diode_model = '1N4148'
ac_line = circuit.AcLine('input', 'input', circuit.gnd, rms_voltage=230@u_V, frequency=50@u_Hz)
# circuit.subcircuit(HP54501A(diode_model='1N4148'))
# circuit.X('hp54501a', 'HP54501A', 'input', circuit.gnd)
circuit.C(1, 'input', circuit.gnd, 1@u_uF)
circuit.X('D1', diode_model, 'line_plus', 'top')
circuit.X('D2', diode_model, 'scope_ground', 'input')
circuit.X('D3', diode_model, circuit.gnd, 'top')
circuit.X('D4', diode_model, 'scope_ground', circuit.gnd)
circuit.R(1, 'top', 'output', 10@u_Ω)
circuit.C(2, 'output', 'scope_ground', 50@u_uF)
circuit.R(2, 'output', 'scope_ground', 900@u_Ω)
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.transient(step_time=ac_line.period/100, end_time=ac_line.period*3)
figure, ax = plt.subplots(figsize=(20, 6))
ax.plot(analysis.input)
ax.plot(analysis.Vinput)
ax.plot(analysis.output - analysis.scope_ground)
ax.legend(('Vin [V]', 'I [A]'), loc=(.8,.8))
ax.grid()
ax.set_xlabel('t [s]')
ax.set_ylabel('[V]')
plt.show()
#f# save_figure('figure', 'hp54501a-cem.png')
| gpl-3.0 |
gotomypc/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
SimonBiggs/pymephisto | pymephisto/csvoutput.py | 1 | 2774 | # Copyright (C) 2015 Simon Biggs
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# http://www.gnu.org/licenses/.
import os
import numpy as np
import pandas as pd
def file_output(output_directory, distance, relative_dose,
scan_curvetype, scan_depth):
"""Store the loaded mephisto data into csv files for easy user confirmation
and use.
"""
# Determines the filepaths for the output
filepaths = determine_output_filepaths(
output_directory, scan_curvetype, scan_depth)
columns = ['distance (mm)', 'relative dose']
# Loop over each curvetype and save the data to csv
for i, curvetype in enumerate(scan_curvetype):
# Stacks the data into one array and transposes into column orientation
data = np.vstack([distance[i], relative_dose[i]]).T
# Use pandas to save data to csv
df = pd.DataFrame(data, columns=columns)
df.to_csv(filepaths[i])
def determine_output_filepaths(output_directory, scan_curvetype, scan_depth):
"""Determine a useful filepath for the saving of each mephisto scan.
"""
filepaths = []
# Loop over each scan curvetype creating a relevant filepath
for i, curvetype in enumerate(scan_curvetype):
if curvetype == 'PDD':
# Create the filename to be pdd_[number].csv
filepaths.append(os.path.join(
output_directory, "pdd_[{0:d}].csv".format(i)))
elif curvetype == 'INPLANE_PROFILE':
# Create the filename to be inplaneprofile_depth_[number].csv
filepaths.append(os.path.join(
output_directory,
"inplaneprofile_{0:d}mm_[{1:d}].csv".format(
int(scan_depth[i]), i)))
elif curvetype == 'CROSSPLANE_PROFILE':
# Create the filename to be crossplaneprofile_depth_[number].csv
filepaths.append(os.path.join(
output_directory,
"crossplaneprofile_{0:d}mm_[{1:d}].csv".format(
int(scan_depth[i]), i)))
else:
# Raise an error if the curve type was not as expected
raise Exception("Unexpected scan_curvetype")
return filepaths
| agpl-3.0 |
zymsys/sms-tools | software/transformations_interface/stftMorph_function.py | 18 | 3291 | # function for doing a morph between two sounds using the stft
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import stft as STFT
import utilFunctions as UF
import stftTransformations as STFTT
def main(inputFile1='../../sounds/ocean.wav', inputFile2='../../sounds/speech-male.wav', window1='hamming', window2='hamming',
M1=1024, M2=1024, N1=1024, N2=1024, H1=256, smoothf = .5, balancef = 0.2):
"""
Function to perform a morph between two sounds
inputFile1: name of input sound file to be used as source
inputFile2: name of input sound file to be used as filter
window1 and window2: windows for both files
M1 and M2: window sizes for both files
N1 and N2: fft sizes for both sounds
H1: hop size for sound 1 (the one for sound 2 is computed automatically)
smoothf: smoothing factor to be applyed to magnitude spectrum of sound 2 before morphing
balancef: balance factor between booth sounds, 0 is sound 1 and 1 is sound 2
"""
# read input sounds
(fs, x1) = UF.wavread(inputFile1)
(fs, x2) = UF.wavread(inputFile2)
# compute analysis windows
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
# perform morphing
y = STFTT.stftMorph(x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef)
# compute the magnitude and phase spectrogram of input sound (for plotting)
mX1, pX1 = STFT.stftAnal(x1, fs, w1, N1, H1)
# compute the magnitude and phase spectrogram of output sound (for plotting)
mY, pY = STFT.stftAnal(y, fs, w1, N1, H1)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile1)[:-4] + '_stftMorph.wav'
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 10000.0
# plot sound 1
plt.subplot(4,1,1)
plt.plot(np.arange(x1.size)/float(fs), x1)
plt.axis([0, x1.size/float(fs), min(x1), max(x1)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot magnitude spectrogram of sound 1
plt.subplot(4,1,2)
numFrames = int(mX1[:,0].size)
frmTime = H1*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1
plt.pcolormesh(frmTime, binFreq, np.transpose(mX1[:,:N1*maxplotfreq/fs+1]))
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.title('magnitude spectrogram of x')
plt.autoscale(tight=True)
# plot magnitude spectrogram of morphed sound
plt.subplot(4,1,3)
numFrames = int(mY[:,0].size)
frmTime = H1*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:N1*maxplotfreq/fs+1]))
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.title('magnitude spectrogram of y')
plt.autoscale(tight=True)
# plot the morphed sound
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
| agpl-3.0 |
mlyundin/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
kaichogami/sympy | sympy/plotting/tests/test_plot.py | 6 | 9238 | from sympy import (pi, sin, cos, Symbol, Integral, Sum, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max, Piecewise)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def flush(self):
pass
encoding = 'utf-8'
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p._backend.close()
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p._backend.close()
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p._backend.close()
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p._backend.close()
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
p._backend.close()
raises(ValueError, lambda: plot(x, y))
p = plot(Piecewise((1, x > 0), (0, True)),(x,-1,1))
p.save(tmp_file('%s_plot_piecewise' % name))
p._backend.close()
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
p._backend.close()
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
p._backend.close()
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
p._backend.close()
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
p._backend.close()
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
p._backend.close()
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p._backend.close()
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
p._backend.close()
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
p._backend.close()
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
p._backend.close()
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
p._backend.close()
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p._backend.close()
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p._backend.close()
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p._backend.close()
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
p._backend.close()
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
p._backend.close()
s = Sum(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p._backend.close()
p = plot(Sum(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
p._backend.close()
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| bsd-3-clause |
HolgerPeters/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
ANNarchy/ANNarchy | examples/vogels_abbott/COBA.py | 2 | 1940 | from ANNarchy import *
setup(dt=0.1)
# ###########################################
# Neuron model
# ###########################################
COBA = Neuron(
parameters="""
El = -60.0 : population
Vr = -60.0 : population
Erev_exc = 0.0 : population
Erev_inh = -80.0 : population
Vt = -50.0 : population
tau = 20.0 : population
tau_exc = 5.0 : population
tau_inh = 10.0 : population
I = 20.0 : population
""",
equations="""
tau * dv/dt = (El - v) + g_exc * (Erev_exc - v) + g_inh * (Erev_inh - v ) + I
tau_exc * dg_exc/dt = - g_exc
tau_inh * dg_inh/dt = - g_inh
""",
spike = "v > Vt",
reset = "v = Vr",
refractory = 5.0
)
# ###########################################
# Create population
# ###########################################
P = Population(geometry=4000, neuron=COBA)
Pe = P[:3200]
Pi = P[3200:]
P.v = Normal(-55.0, 5.0)
P.g_exc = Normal(4.0, 1.5)
P.g_inh = Normal(20.0, 12.0)
# ###########################################
# Connect the network
# ###########################################
Ce = Projection(pre=Pe, post=P, target='exc')
Ce.connect_fixed_probability(weights=0.6, probability=0.02)
Ci = Projection(pre=Pi, post=P, target='inh')
Ci.connect_fixed_probability(weights=6.7, probability=0.02)
compile()
# ###########################################
# Simulate
# ###########################################
m = Monitor(P, ['spike'])
simulate(1000.0, measure_time=True)
data = m.get('spike')
# ###########################################
# Make plots
# ###########################################
t, n = m.raster_plot(data)
print('Mean firing rate in the population: ' + str(len(t) / 4000.) + 'Hz')
import matplotlib.pyplot as plt
plt.plot(t, n, '.', markersize=0.5)
plt.xlabel('Time (ms)')
plt.ylabel('# neuron')
plt.show()
| gpl-2.0 |
sibis-platform/ncanda-datacore | datadict/datadict_utils.py | 2 | 2126 | import pandas as pd
from six import string_types
def load_datadict(filepath, trim_index=True, trim_all=False, force_names=False):
# TODO: Verify headers / set column names, if necessary
headers = ["Variable / Field Name",
"Form Name",
"Section Header",
"Field Type",
"Field Label",
"Choices, Calculations, OR Slider Labels",
"Field Note",
"Text Validation Type OR Show Slider Number",
"Text Validation Min",
"Text Validation Max",
"Identifier?",
"Branching Logic (Show field only if...)",
"Required Field?",
"Custom Alignment",
"Question Number (surveys only)",
"Matrix Group Name",
"Matrix Ranking?",
"Field Annotation"]
index_name = headers[0]
headers = headers[1:]
df = pd.read_csv(filepath, index_col=0, dtype=object, engine="python")
try:
assert df.index.name == index_name, F"Index must be named {index_name}"
assert df.columns.isin(headers).all(), "Nonstandard headers!"
except AssertionError:
if force_names:
df.index.name = index_name
df.columns = headers
else:
raise
if trim_index:
df.index = df.index.to_series().str.strip()
if trim_all:
df = df.applymap(lambda x: x.strip() if isinstance(x, string_types) else x)
return df
def insert_rows_at(main_df, index_name, inserted_df, insert_before=False):
# Not checking if index exists because that will be apparent from error
# NOTE: This will not work with duplicate indices
pre_df = main_df.loc[:index_name]
post_df = main_df.loc[index_name:]
# Both pre_ and post_ contain the value at index_name, so one needs to
# drop it
if not insert_before:
pre_df = pre_df.drop(index_name)
else:
post_df = post_df.drop(index_name)
return pd.concat([pre_df, inserted_df, post_df],
sort=False,
axis=0)
| bsd-3-clause |
petebachant/seaborn | seaborn/tests/test_palettes.py | 22 | 9613 | import colorsys
import numpy as np
import matplotlib as mpl
import nose.tools as nt
import numpy.testing as npt
from .. import palettes, utils, rcmod, husl
from ..xkcd_rgb import xkcd_rgb
from ..crayons import crayons
class TestColorPalettes(object):
def test_current_palette(self):
pal = palettes.color_palette(["red", "blue", "green"], 3)
rcmod.set_palette(pal, 3)
nt.assert_equal(pal, mpl.rcParams["axes.color_cycle"])
rcmod.set()
def test_palette_context(self):
default_pal = palettes.color_palette()
context_pal = palettes.color_palette("muted")
with palettes.color_palette(context_pal):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], default_pal)
def test_big_palette_context(self):
original_pal = palettes.color_palette("deep", n_colors=8)
context_pal = palettes.color_palette("husl", 10)
rcmod.set_palette(original_pal)
with palettes.color_palette(context_pal, 10):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], original_pal)
# Reset default
rcmod.set()
def test_seaborn_palettes(self):
pals = "deep", "muted", "pastel", "bright", "dark", "colorblind"
for name in pals:
pal_out = palettes.color_palette(name)
nt.assert_equal(len(pal_out), 6)
def test_hls_palette(self):
hls_pal1 = palettes.hls_palette()
hls_pal2 = palettes.color_palette("hls")
npt.assert_array_equal(hls_pal1, hls_pal2)
def test_husl_palette(self):
husl_pal1 = palettes.husl_palette()
husl_pal2 = palettes.color_palette("husl")
npt.assert_array_equal(husl_pal1, husl_pal2)
def test_mpl_palette(self):
mpl_pal1 = palettes.mpl_palette("Reds")
mpl_pal2 = palettes.color_palette("Reds")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_mpl_dark_palette(self):
mpl_pal1 = palettes.mpl_palette("Blues_d")
mpl_pal2 = palettes.color_palette("Blues_d")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_bad_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("IAmNotAPalette")
def test_terrible_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("jet")
def test_bad_palette_colors(self):
pal = ["red", "blue", "iamnotacolor"]
with nt.assert_raises(ValueError):
palettes.color_palette(pal)
def test_palette_desat(self):
pal1 = palettes.husl_palette(6)
pal1 = [utils.desaturate(c, .5) for c in pal1]
pal2 = palettes.color_palette("husl", desat=.5)
npt.assert_array_equal(pal1, pal2)
def test_palette_is_list_of_tuples(self):
pal_in = np.array(["red", "blue", "green"])
pal_out = palettes.color_palette(pal_in, 3)
nt.assert_is_instance(pal_out, list)
nt.assert_is_instance(pal_out[0], tuple)
nt.assert_is_instance(pal_out[0][0], float)
nt.assert_equal(len(pal_out[0]), 3)
def test_palette_cycles(self):
deep = palettes.color_palette("deep")
double_deep = palettes.color_palette("deep", 12)
nt.assert_equal(double_deep, deep + deep)
def test_hls_values(self):
pal1 = palettes.hls_palette(6, h=0)
pal2 = palettes.hls_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.hls_palette(5, l=.2)
pal_bright = palettes.hls_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.hls_palette(5, s=.1)
pal_bold = palettes.hls_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_husl_values(self):
pal1 = palettes.husl_palette(6, h=0)
pal2 = palettes.husl_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.husl_palette(5, l=.2)
pal_bright = palettes.husl_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.husl_palette(5, s=.1)
pal_bold = palettes.husl_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_cbrewer_qual(self):
pal_short = palettes.mpl_palette("Set1", 4)
pal_long = palettes.mpl_palette("Set1", 6)
nt.assert_equal(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
nt.assert_equal(pal_full, pal_long[:8])
def test_mpl_reversal(self):
pal_forward = palettes.mpl_palette("BuPu", 6)
pal_reverse = palettes.mpl_palette("BuPu_r", 6)
nt.assert_equal(pal_forward, pal_reverse[::-1])
def test_rgb_from_hls(self):
color = .5, .8, .4
rgb_got = palettes._color_to_rgb(color, "hls")
rgb_want = colorsys.hls_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_husl(self):
color = 120, 50, 40
rgb_got = palettes._color_to_rgb(color, "husl")
rgb_want = husl.husl_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_xkcd(self):
color = "dull red"
rgb_got = palettes._color_to_rgb(color, "xkcd")
rgb_want = xkcd_rgb[color]
nt.assert_equal(rgb_got, rgb_want)
def test_light_palette(self):
pal_forward = palettes.light_palette("red")
pal_reverse = palettes.light_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.light_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_dark_palette(self):
pal_forward = palettes.dark_palette("red")
pal_reverse = palettes.dark_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.dark_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_blend_palette(self):
colors = ["red", "yellow", "white"]
pal_cmap = palettes.blend_palette(colors, as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_cubehelix_against_matplotlib(self):
x = np.linspace(0, 1, 8)
mpl_pal = mpl.cm.cubehelix(x)[:, :3].tolist()
sns_pal = palettes.cubehelix_palette(8, start=0.5, rot=-1.5, hue=1,
dark=0, light=1, reverse=True)
nt.assert_list_equal(sns_pal, mpl_pal)
def test_cubehelix_n_colors(self):
for n in [3, 5, 8]:
pal = palettes.cubehelix_palette(n)
nt.assert_equal(len(pal), n)
def test_cubehelix_reverse(self):
pal_forward = palettes.cubehelix_palette()
pal_reverse = palettes.cubehelix_palette(reverse=True)
nt.assert_list_equal(pal_forward, pal_reverse[::-1])
def test_cubehelix_cmap(self):
cmap = palettes.cubehelix_palette(as_cmap=True)
nt.assert_is_instance(cmap, mpl.colors.ListedColormap)
pal = palettes.cubehelix_palette()
x = np.linspace(0, 1, 6)
npt.assert_array_equal(cmap(x)[:, :3], pal)
cmap_rev = palettes.cubehelix_palette(as_cmap=True, reverse=True)
x = np.linspace(0, 1, 6)
pal_forward = cmap(x).tolist()
pal_reverse = cmap_rev(x[::-1]).tolist()
nt.assert_list_equal(pal_forward, pal_reverse)
def test_xkcd_palette(self):
names = list(xkcd_rgb.keys())[10:15]
colors = palettes.xkcd_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, xkcd_rgb[name])
def test_crayon_palette(self):
names = list(crayons.keys())[10:15]
colors = palettes.crayon_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, crayons[name].lower())
def test_color_codes(self):
palettes.set_color_codes("deep")
colors = palettes.color_palette("deep") + [".1"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
nt.assert_equal(rgb_want, rgb_got)
palettes.set_color_codes("reset")
def test_as_hex(self):
pal = palettes.color_palette("deep")
for rgb, hex in zip(pal, pal.as_hex()):
nt.assert_equal(mpl.colors.rgb2hex(rgb), hex)
def test_preserved_palette_length(self):
pal_in = palettes.color_palette("Set1", 10)
pal_out = palettes.color_palette(pal_in)
nt.assert_equal(pal_in, pal_out)
| bsd-3-clause |
brodoll/sms-tools | lectures/04-STFT/plots-code/window-size.py | 22 | 1498 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 128
start = .81*fs
x1 = x[start:start+N]
plt.figure(1, figsize=(9.5, 6))
plt.subplot(321)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x1*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x1, M = 128')
mX, pX = DF.dftAnal(x1, np.hamming(N), N)
plt.subplot(323)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX1')
plt.subplot(325)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX1')
N = 1024
start = .81*fs
x2 = x[start:start+N]
mX, pX = DF.dftAnal(x2, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x2*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x2), max(x2)])
plt.title('x2, M = 1024')
plt.subplot(324)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX2')
plt.subplot(326)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX2')
plt.tight_layout()
plt.savefig('window-size.png')
plt.show()
| agpl-3.0 |
numeristical/introspective | ml_insights/splinecalib.py | 1 | 20123 | import numpy as np
from .calibration_utils import _natural_cubic_spline_basis_expansion
from .calibration_utils import create_yeqx_bias_vectors, logreg_cv
import random
import warnings
import matplotlib.pyplot as plt
class SplineCalib(object):
"""Probability calibration using cubic splines.
This defines a calibrator object. The calibrator can be `fit` on
model outputs and truth values. After being fit, it can then be used to
`calibrate` model outputs.
This is similar to the sklearn fit/transform paradigm, except that it is
intended for post-processing of model outputs rather than preprocessing
of model inputs.
Parameters
----------
penalty : 'l1' or 'l2'
What kind of coefficient penalization to use. The default is 'l2',
which does a standard "smoothing" spline that minimizes the second
derivative of the resulting function. An 'l1' penalty will typically
give function which has a sparser representation in the spline bases.
The 'l1' penalty can only be used with the 'liblinear' or 'saga'
solver and tends to be considerably slower.
solver : 'lbfgs','liblinear','newton-cg','sag','saga'
Which solver to use in the sklearn LogisticRegression object that
powers the spline fitting. Specifying 'default' will use the 'lbfgs'
for L2 penalty and 'liblinear' for L1.
knot_sample_size :
The number of knots to randomly sample from the training
values. More knots take longer to fit. Too few knots may underfit.
Too many knots could overfit, but usually the regularization will
control that from happening. If `knot_sample_size` exceeds the number
of unique values in the input, then all unique values will be chosen.
add_knots :
A list (or np_array) of knots that will be used for the
spline fitting in addition to the random sample. This may be useful
if you want to force certain knots to be used in areas where the data
is sparse.
reg_param_vec :
A list (or np_array) of values to try for the 'C' parameter
for regularization in the sklearn LogisticRegression. These should
be positive numbers on a logarithmic scale for best results. If
'default' is chosen it will try 17 evenly spaced values (log scale)
between .0001 and 10000 (inclusive)
cv_spline :
Number of folds to use for the cross-validation to find the
best regularization parameter. Default is 5. Folds are chosen
in a stratified manner.
random_state :
If desired, can specify the random state for the generation
of the stratified folds.
unity_prior :
If True, routine will add synthetic data along the axis y=x as
a "prior" distribution that favors that function. Default is False.
unity_prior_weight :
The total weight of data points added when unity_prior
is set to True. Bigger values will force the calibration curve closer
to the line y=x.
unity_prior_gridsize :
The resolution of the grid used to create the
unity_prior data augmentation. Default is 100, meaning it would
create synthetic data at x=0, .01 ,.02 ,...,.99 ,1.
logodds_scale :
Whether or not to transform the x-values to the log odds
scale before doing the basis expansion. Default is True and is
recommended unless it is suspected that the uncalibrated probabilities
already have a logistic relationship to the true probabilities.
logodds_eps :
Used only when logodds_scale=True. Since 0 and 1 map to
positive and negative infinity on the logodds scale, we must
specify a minimum and maximum probability before the transformation.
Default is 'auto' which chooses a reasonable value based on the
smallest positive value seen and the largest value smaller than 1.
reg_prec :
A positive integer designating the number of decimal places to
which to round the log_loss when choosing the best regularization
parameter. Algorithm breaks ties in favor of more regularization.
Higher numbers will potentially use less regularization and lower
numbers use more regularization. Default is 4.
force_knot_endpts :
If True, the smallest and largest input value will
automatically chosen as knots, and `knot_sample_size`-2 knots
will be chosen among the remaining values. Default is True.
Attributes
----------
n_classes: The number of classes for which the calibrator was fit.
knot_vec: The knots chosen (on the probability scale).
knot_vec_tr: If logodds_scale = True, this will be the values of
the knots on the logodds scale. Otherwise it is the same
as knot_vec.
lrobj: (binary) The resulting sklearn LogisticRegression object that
is applied to the natural cubic spline basis. This
is not used directly, but provided for reference.
binary_splinecalibs: (multiclass) A list of the binary splinecalib objects
used to do the multiclass calibration, indexed by class number.
References
----------
Lucena, B. Spline-Based Probability Calibration. https://arxiv.org/abs/1809.07751
"""
def __init__(self, penalty='l2', solver='default',
knot_sample_size = 30, add_knots = None,
reg_param_vec = 'default', cv_spline=5, random_state=42,
unity_prior=False, unity_prior_gridsize=100,
unity_prior_weight=100, max_iter=1000, tol=.0001,
logodds_scale=True, logodds_eps='auto', reg_prec=4,
force_knot_endpts=True):
self.knot_sample_size = knot_sample_size
self.add_knots = add_knots
if (type(reg_param_vec)==str) and (reg_param_vec == 'default'):
self.reg_param_vec = np.logspace(-4, 4, 17)
else:
self.reg_param_vec = np.array(reg_param_vec)
self.cv_spline = cv_spline
self.random_state = random_state
self.max_iter = max_iter
self.tol = tol
self.penalty = penalty
self.unity_prior = unity_prior
self.unity_prior_weight = unity_prior_weight
self.unity_prior_gridsize = unity_prior_gridsize
self.reg_prec = reg_prec
self.force_knot_endpts = force_knot_endpts
self.logodds_scale = logodds_scale
if type(logodds_eps==str) and (logodds_eps=='auto'):
self.logodds_eps_auto = True
self.logodds_eps = .001
else:
self.logodds_eps = logodds_eps
self.logodds_eps_auto=False
self.solver = solver
if (penalty=='l1') and not (solver in ['liblinear','saga']):
if (solver!='default'):
warnings.warn('L1 penalty requires "liblinear" or "saga" solver.\nUsing "liblinear"')
self.solver = 'liblinear'
if (penalty=='l2') and (solver=='default'):
self.solver = 'lbfgs'
def fit(self, y_model, y_true, verbose=False):
"""Fit the calibrator given a set of predictions and truth values.
This method will fit the calibrator. It handles both binary and
multiclass problems.
Parameters
----------
y_pred : array-like, shape (n_samples, n_classes)
Model outputs on which to perform calibration.
If passed a 1-d array of length (n_samples) this will be presumed
to mean binary classification and the inputs presumed to be the
probability of class "1".
If passed a 2-d array, it is assumed to be a multiclass calibration
where the number of classes is n_classes. Binary problems may
take 1-d or 2-d arrays as y_pred.
y_true : array-like, shape (n_samples)
Truth values to calibrate against. Values must be integers
between 0 and n_classes-1
"""
if len(y_model.shape)>1:
# 2-d array
self.n_classes =y_model.shape[1]
else:
self.n_classes = 2
if self.n_classes > 2:
self._fit_multiclass(y_model, y_true, verbose=verbose)
else:
if len(y_model.shape) == 2:
y_model = y_model[:,1]
self._fit_binary(y_model, y_true, verbose=verbose)
def _fit_binary(self, y_model, y_true, verbose=False):
if verbose:
print("Determining Calibration Function")
# Determine the knots to use (on probability scale)
self.knot_vec = self._get_knot_vec(y_model)
# Augment data with unity prior (if necessary)
self.use_weights = False
if self.unity_prior:
scorevec_coda, truthvec_coda = create_yeqx_bias_vectors(self.unity_prior_gridsize)
self.use_weights = True
coda_wt = self.unity_prior_weight/((self.unity_prior_gridsize+1)*(self.unity_prior_gridsize))
weightvec = np.concatenate((np.ones(len(y_model)), coda_wt * np.ones(len(scorevec_coda))))
y_model = np.concatenate((y_model, scorevec_coda))
y_true = np.concatenate((y_true, truthvec_coda))
# map data and knots to log odds scale if necessary
if self.logodds_scale:
if self.logodds_eps_auto:
self._compute_logodds_eps_from_data(y_model)
self.knot_vec_tr = np.minimum(1-self.logodds_eps, self.knot_vec)
self.knot_vec_tr = np.maximum(self.logodds_eps, self.knot_vec_tr)
self.knot_vec_tr = np.unique(self.knot_vec_tr)
self.knot_vec_tr = np.log(self.knot_vec_tr/(1-self.knot_vec_tr))
y_model_tr = np.minimum(1-self.logodds_eps, y_model)
y_model_tr = np.maximum(self.logodds_eps, y_model_tr)
y_model_tr = np.log(y_model_tr/(1-y_model_tr))
else:
y_model_tr = y_model
self.knot_vec_tr = self.knot_vec
# compute basis expansion
X_mat = _natural_cubic_spline_basis_expansion(y_model_tr, self.knot_vec_tr)
# perform cross-validated logistic regression
if self.use_weights:
best_C, ll_vec, reg = logreg_cv(X_mat, y_true, self.cv_spline,
self.reg_param_vec,
weightvec=weightvec,
penalty=self.penalty,
solver=self.solver,
max_iter=self.max_iter,
tol=self.tol,
reg_prec=self.reg_prec)
else:
best_C, ll_vec, reg = logreg_cv(X_mat, y_true, self.cv_spline,
self.reg_param_vec,
penalty=self.penalty,
solver=self.solver,
max_iter=self.max_iter,
tol=self.tol,
reg_prec=self.reg_prec)
self.best_reg_param = best_C
self.reg_param_scores = ll_vec
self.basis_coef_vec = reg.coef_
self.lrobj = reg
def _fit_multiclass(self, y_model, y_true, verbose=False):
self.binary_splinecalibs = []
for i in range(self.n_classes):
if verbose:
print('Calibrating Class {}'.format(i))
le_auto = 'auto' if self.logodds_eps_auto else self.logodds_eps
self.binary_splinecalibs.append(SplineCalib(
penalty=self.penalty,
knot_sample_size = self.knot_sample_size,
add_knots = self.add_knots,
reg_param_vec = self.reg_param_vec,
cv_spline=self.cv_spline,
random_state=self.random_state,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver,
logodds_scale=self.logodds_scale,
logodds_eps=le_auto,
unity_prior = self.unity_prior,
unity_prior_weight = self.unity_prior_weight,
unity_prior_gridsize = self.unity_prior_gridsize))
self.binary_splinecalibs[i].fit(y_model[:,i],
(y_true==i).astype(int))
def _get_knot_vec(self, y_model):
"""Routine to choose the set of knots."""
random.seed(self.random_state)
unique_vals = np.unique(y_model)
num_unique = len(unique_vals)
if(num_unique<3):
raise Exception('Less than 3 unique input values.')
if (self.knot_sample_size==0):
if (self.add_knots is None):
raise Exception('Must have knot_sample_size>0 or specify add_knots')
else:
return(np.unique(self.add_knots))
if (self.knot_sample_size<2) and (self.force_knot_endpts):
warn_msg = ('force_knot_endpts is True but knot_sample_size<2.\n' +
'Changing force_knot_endpts to False.')
warnings.warn(warn_msg)
self.force_knot_endpts=False
# Usual case: more unique values than the sample being chosen
if (num_unique > self.knot_sample_size):
if self.force_knot_endpts:
smallest_knot, biggest_knot = unique_vals[0], unique_vals[-1]
other_vals = unique_vals[1:-1]
random.shuffle(other_vals)
curr_knot_vec = other_vals[:(self.knot_sample_size-2)]
curr_knot_vec = np.concatenate((curr_knot_vec,
[smallest_knot, biggest_knot]))
else:
random.shuffle(unique_vals)
curr_knot_vec = unique_vals[:self.knot_sample_size]
# use all the unique_vals
else:
curr_knot_vec = unique_vals
# Add in the additional knots
if self.add_knots is not None:
add_knot_vec = np.array(self.add_knots)
curr_knot_vec = np.concatenate((curr_knot_vec,add_knot_vec))
# Sort and remove duplicates
return(np.unique(curr_knot_vec))
def calibrate(self, y_in):
"""Calibrates a set of predictions after being fit.
This function returns calibrated probabilities after being
fit on a set of predictions and their true answers. It handles
either binary and multiclass problems, depending on how it was fit.
Parameters
----------
y_in : array-like, shape (n_samples, n_features)
The pre_calibrated scores. For binary classification
can pass in a 1-d array representing the probability
of class 1.
Returns
-------
y_out : array, shape (n_samples, n_classes)
The calibrated probabilities: y_out will be returned
in the same shape as y_in.
"""
if self.n_classes>2:
return(self._calibrate_multiclass(y_in))
elif self.n_classes==2:
return(self._calibrate_binary(y_in))
else:
warnings.warn('SplineCalib not fit or only one class found')
def _calibrate_binary(self, y_in):
if (len(y_in.shape)==2):
if (y_in.shape[1]==2):
y_in_to_use = y_in[:,1]
two_col = True
elif (y_in.shape[0]==1):
y_in_to_use = y_in[:,0]
two_col=False
elif (len(y_in.shape)==1):
y_in_to_use = y_in
two_col = False
else:
warnings.warn('Unable to handle input of this shape')
if self.logodds_scale:
y_in_to_use = np.minimum(1-self.logodds_eps, y_in_to_use)
y_in_to_use = np.maximum(self.logodds_eps, y_in_to_use)
y_model_tr = np.log(y_in_to_use/(1-y_in_to_use))
else:
y_model_tr = y_in_to_use
basis_exp = _natural_cubic_spline_basis_expansion(y_model_tr,self.knot_vec_tr)
y_out = basis_exp.dot(self.basis_coef_vec.T)
y_out = 1/(1+np.exp(-y_out))
if (len(y_out.shape)>1):
y_out = y_out[:,0]
y_out = np.vstack((1-y_out, y_out)).T if two_col else y_out
return y_out
def _calibrate_multiclass(self, y_in):
y_out = -1*np.ones(y_in.shape)
for i in range(self.n_classes):
y_out[:,i] = self.binary_splinecalibs[i].calibrate(y_in[:,i])
y_out = (y_out.T/(np.sum(y_out, axis=1))).T
return y_out
def show_spline_reg_plot(self, class_num=None):
"""Plots the cross-val loss against the regularization parameter.
This is a diagnostic tool, for example, to indicate whether or
not the search over regularization parameter values was wide
enough or dense enough.
For multiclass calibration, the class number must be given."""
if self.n_classes == 2:
plt.plot(np.log10(self.reg_param_vec),self.reg_param_scores, marker='x')
plt.xlabel('Regularization Parameter (log 10 scale)')
plt.ylabel('Average Log Loss across folds')
plt.axvline(np.log10(self.best_reg_param),0,1,color='black',linestyle='--')
else:
if class_num is None:
warnings.warn('Must specify a class number for Multiclass Calibration')
else:
obj=self.binary_splinecalibs[class_num]
plt.plot(np.log10(obj.reg_param_vec),obj.reg_param_scores, marker='x')
plt.xlabel('Regularization Parameter (log 10 scale)')
plt.ylabel('Average Log Loss across folds')
plt.axvline(np.log10(obj.best_reg_param),0,1,color='black',linestyle='--')
def show_calibration_curve(self, class_num=None, resolution=.001,
show_baseline=True):
"""Plots the calibration curve as a function from [0,1] to [0,1]
Parameters
----------
resolution: The space between the plotted points on the x-axis.
"""
tvec = np.linspace(0,1,int(np.ceil(1/resolution))+1)
avec = np.logspace(-16,-3,6)
bvec = 1-avec
tvec = np.unique(np.concatenate((tvec,avec,bvec)))
if self.n_classes == 2:
plt.plot(tvec, self.calibrate(tvec))
if show_baseline:
plt.plot(tvec,tvec,'k--')
plt.axis([-0.1,1.1,-0.1,1.1])
plt.xlabel('Uncalibrated')
plt.ylabel('Calibrated')
elif class_num is None:
warnings.warn('Must specify a class number for Multiclass Calibration')
else:
self.binary_splinecalibs[class_num].show_calibration_curve(
resolution=resolution,
show_baseline=show_baseline)
def transform(self, y_in):
"""alias for calibrate"""
return self.calibrate(y_in)
def predict(self, y_in):
"""alias for calibrate"""
return self.calibrate(y_in)
def predict_proba(self, y_in):
"""alias for calibrate"""
return self.calibrate(y_in)
def _compute_logodds_eps_from_data(self, y_model):
"""Routine to automatically choose the logodds_eps value"""
y_model_loe = y_model[(y_model>0) & (y_model<1)]
closest_to_zero = np.min(y_model_loe)
closest_to_one = 1-np.min(1-y_model_loe)
loe_exp = np.round(np.log10(np.min((closest_to_zero,
closest_to_one))))-1
self.logodds_eps = np.minimum(self.logodds_eps, 10**loe_exp)
| mit |
khkaminska/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
MycChiu/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
johnmgregoire/JCAPdatavis | echem_stacked_tern_batch.py | 1 | 6770 | import matplotlib.cm as cm
import numpy
import pylab
import h5py, operator, copy, os, csv, sys
from echem_plate_fcns import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern import *
#os.chdir(cwd)
def myeval(c):
if c=='None':
c=None
else:
temp=c.lstrip('0')
if (temp=='' or temp=='.') and '0' in c:
c=0
else:
c=eval(temp)
return c
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop')
rootstr='20120728NiFeCoTiplate'
#expstr='CV2V_Ithresh'
#fomlabel='Potential for 0.1mA (V vs H$_2$0/O$_2$)'
#fomshift=-.2
#vmin=.3
#vmax=.6
#expstr='CP1Ess'
#fomlabel='Potential for 0.02mA (V vs H$_2$0/O$_2$)'
#fomshift=-.2
#vmin=.21
#vmax=.45
#cmap=cm.jet_r
#aboverangecolstr='k'
#belowrangecolstr=''
expstr='CV2Imax'
fomlabel='max I in CV (A)'
fomshift=0
vmin=.21
vmax=.45
cmap=cm.jet
aboverangecolstr='.4'
belowrangecolstr='k'
dpl=['', '', '']
for root, dirs, files in os.walk(os.getcwd()):
testfn=[fn for fn in files if (rootstr in fn) and (expstr in fn)]
for fn in testfn:
for count in range(3):
if ('plate%d' %(count+1)) in fn:
dpl[count]=os.path.join(root, fn)
print 'FOM file paths:'
for dp in dpl:
print dp
savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20120728NiFeCoTi_allplateresults'
dropdl=[]
for dp in dpl:
f=open(dp, mode='r')
dr=csv.DictReader(f, delimiter='\t')
dropd={}
for l in dr:
for kr in l.keys():
k=kr.strip()
if not k in dropd.keys():
dropd[k]=[]
dropd[k]+=[myeval(l[kr].strip())]
for k in dropd.keys():
dropd[k]=numpy.array(dropd[k])
f.close()
dropdl+=[dropd]
pointsize=20
opacity=.6
view_azim=-159
view_elev=30
fig=pylab.figure(figsize=(9, 4.*len(dropdl)))
figquatall=[]
compsall=[]
fomall=[]
plateindall=[]
for count, dropd in enumerate(dropdl):
#dropinds=numpy.arange(len(dropd['Sample']))
dropinds=numpy.argsort(dropd['Sample'])
x=dropd['x(mm)'][dropinds]
y=dropd['y(mm)'][dropinds]
fom=dropd[expstr][dropinds]+fomshift
clip=True
for fcn, vstr in zip([cmap.set_over, cmap.set_under], [aboverangecolstr, belowrangecolstr]):
if len(vstr)==0:
continue
c=col_string(vstr)
fcn(c)
clip=False
norm=colors.Normalize(vmin=vmin, vmax=vmax, clip=clip)
print 'fom min, max, mean, std:', fom.min(), fom.max(), fom.mean(), fom.std()
if numpy.any(fom>vmax):
if numpy.any(fom<vmin):
extend='both'
else:
extend='max'
elif numpy.any(fom<vmin):
extend='min'
else:
extend='neither'
#ax=pylab.subplot(211)
#ax2=pylab.subplot(212)
#pylab.subplots_adjust(left=.03, right=.97, top=.97, bottom=.03, hspace=.01)
ax2=fig.add_subplot(len(dropdl), 1, count+1)
ax2.set_aspect(1)
mapbl=ax2.scatter(x, y, c=fom, s=60, marker='s', edgecolors='none', cmap=cmap, norm=norm)
ax2.set_xlim(x.min()-2, x.max()+2)
ax2.set_ylim(y.min()-2, y.max()+2)
ax2.set_title('plate %d' %(count+1))
#pylab.title('CP1Ess (V) Map')
comp=numpy.array([[dropd['A'][i], dropd['B'][i], dropd['C'][i], dropd['D'][i]] for i in dropinds])
comp=numpy.array([a/a.sum() for a in comp])
figquat=pylab.figure(figsize=(8, 8))
stp = QuaternaryPlot(111, minlist=[0., 0., 0., 0.], ellabels=ellabels)
stp.scatter(comp, c=fom, s=pointsize, edgecolors='none', cmap=cmap, norm=norm)
stp.label(ha='center', va='center', fontsize=16)
stp.set_projection(azim=view_azim, elev=view_elev)
caxquat=figquat.add_axes((.83, .3, .04, .4))
cb=pylab.colorbar(stp.mappable, cax=caxquat, extend=extend)
cb.set_label(fomlabel, fontsize=16)
stp.ax.set_title('plate %d' %(count+1))
compsall+=list(comp)
fomall+=list(fom)
figquatall+=[figquat]
plateindall+=[count]*len(fom)
fig.subplots_adjust(left=.05, bottom=.03, top=.96, right=.83, hspace=.14)
cax=fig.add_axes((.85, .3, .04, .4))
cb=pylab.colorbar(mapbl, cax=cax, extend=extend)
cb.set_label(fomlabel, fontsize=20)
compsall=numpy.array(compsall)
fomall=numpy.array(fomall)
plateindall=numpy.array(plateindall)
axl, stpl=make10ternaxes(ellabels=ellabels)
pylab.figure(figsize=(8, 8))
stpquat=QuaternaryPlot(111, ellabels=ellabels)
stpquat.scatter(compsall, c=fomall, s=20, edgecolors='none', cmap=cmap, norm=norm)
scatter_10axes(compsall, fomall, stpl, s=20, edgecolors='none', cmap=cmap, norm=norm)
stpquat.label()
stpquat.set_projection(azim=view_azim, elev=view_elev)
figtemp=pylab.figure(stpquat.ax.figure.number)
cax10=figtemp.add_axes((.83, .3, .04, .4))
cb=pylab.colorbar(stpquat.mappable, cax=cax10, extend=extend)
cb.set_label(fomlabel, fontsize=16)
figtemp=pylab.figure(axl[0].figure.number)
cax10=figtemp.add_axes((.85, .3, .04, .4))
cb=pylab.colorbar(stpquat.mappable, cax=cax10, extend=extend)
cb.set_label(fomlabel, fontsize=16)
purelfig=pylab.figure()
linestyle=['-', '--', '-.', ':']
for count, col in enumerate(['c', 'm', 'y', 'k']):
c_el=compsall[:, count]
inds=numpy.where(c_el>.99)
fom_el=fomall[inds]
plate_inds=plateindall[inds]
fom_plate_thick=numpy.array([fom_el[plate_inds==i][-4:] for i in range(3)])
for thickcount, (fom_plate, ls) in enumerate(zip(fom_plate_thick.T, linestyle)):
if count==3 or thickcount==0:
pylab.plot([1, 2, 3], fom_plate, col+ls, marker=r'$%d$' %(thickcount+1),markersize=13, label='%s,thick. %d' %(ellabels[count], thickcount+1))
# elif thickcount==0:
# pylab.plot([1, 2, 3], fom_plate, col+ls, marker=r'$%d$' %(thickcount+1),markersize=13, label='%s' %(ellabels[count],) )
else:
pylab.plot([1, 2, 3], fom_plate, col+ls, marker=r'$%d$' %(thickcount+1),markersize=13)
pylab.xlim(.7, 4.3)
pylab.xlabel('plate number')
pylab.ylabel('fom')
pylab.title('PURE ELEMENTS. color=element(CMYK). #=thickness')
pylab.legend(loc=1)
if 1:
os.chdir(savefolder)
pylab.figure(fig.number)
pylab.savefig('%s_PlatesAll_Posn.png' %expstr)
for count, fg in enumerate(figquatall):
pylab.figure(fg.number)
pylab.savefig('%s_Plate%d_Quat.png' %(expstr, count+1))
pylab.figure(stpquat.ax.figure.number)
pylab.savefig('%s_PlatesAll_Quat.png' %expstr)
pylab.figure(axl[0].figure.number)
pylab.savefig('%s_stackedtern.png' %expstr)
pylab.figure(purelfig.number)
pylab.savefig('%s_pureelements.png' %expstr)
pylab.show()
| bsd-3-clause |
henrymliu/Dynamical_Billiards | Buminovich.py | 1 | 10422 | from matplotlib import pyplot as plt
from matplotlib import animation
import matplotlib.patches as mpatches
import matplotlib.axes as axes
import numpy as np
from scipy import optimize as op
from AbstractTable import AbstractTable as abT
from matplotlib.collections import PatchCollection
class Buminovich(abT):
"""docstring for Buminovich."""
def __init__(self, **kwargs):
super(Buminovich,self).__init__(**kwargs)
self.length = 2
self.height = 2
self.minlinex = 0
self.minliney = 0
self.radius = self.height / 2
def drawTable(self, ec='none'):
"""
makes a fig and axes and adds the table as a collection of patches.
ec should be set to 'k' when generating a preview
"""
self.fig, self.ax = plt.subplots(figsize=(10, 10))
self.fig.canvas.set_window_title('Buminovich Stadium Billiards Simulation')
self.ax.set(xlim=[-(self.minlinex + self.radius + 0.5),(self.length + self.radius + 0.5)],
ylim=[-(self.radius + 0.5), self.radius + 0.5])
self.ax.axis('off')
# make empty patches list
patches=[]
# define both side arcs as patches and add them to the patch list
patches.append(mpatches.Arc((0, 0), self.height, self.height, 0, 90, 270))
patches.append(mpatches.Arc((self.length, 0), self.height, self.height,
0, 270, 450))
# define both lines as patches and add them to patches list
patches.append(plt.Polygon(np.array([[0, self.radius],
[self.length,self.radius]])))
patches.append(plt.Polygon(np.array([[0, -self.radius],
[self.length,-self.radius]])))
# make the collection from the list
self.table = PatchCollection(patches)
# set table edge parameters since the collection resets them all
self.table.set_edgecolor(ec)
self.table.set_linewidth(1)
self.table.set_facecolor('none')
# put collection on figure
self.ax.add_collection(self.table)
plt.axis('equal')
def step(self,particle, dt):
"""
checks for collissions and updates velocities accordinly for one
particle
"""
if particle.state[0] >= self.minlinex and particle.state[0] <= self.length:
crossed_ymax = particle.state[1] > self.radius
crossed_ymin = particle.state[1] < -self.radius
if crossed_ymin:
if particle.state[2] != 0:
fun = lambda x: particle.state[3]/particle.state[2]* \
(x-particle.state[0])+particle.state[1] + self.radius
root=op.brentq(fun, self.minlinex - 0.1, self.length + 0.1)
particle.state[0]=root
particle.state[1]=-self.radius
particle.state[3]*=-1
if crossed_ymax:
if particle.state[2] != 0:
fun = lambda x: particle.state[3]/particle.state[2]* \
(x-particle.state[0])+particle.state[1]-self.radius
root = op.brentq(fun, self.minlinex - 0.1, self.length + 0.1)
particle.state[0]=root
particle.state[1]=self.radius
particle.state[3]*=-1
elif particle.state[0] < self.minlinex:
if np.hypot(particle.state[0], particle.state[1]) > self.radius:
if particle.state[2] != 0 and abs(particle.state[3] / particle.state[2]) <= 1:
m = particle.state[3] / particle.state[2]
intercept = m * -particle.state[0] + particle.state[1]
# discrimanant
b = 2 * m * intercept
a = m ** 2 + 1
c = -self.radius ** 2 + intercept ** 2
# choose root based on proximity with x
root = -b / (2 * a)
dis = (np.sqrt(abs(b ** 2 - 4 * a * c))) / (2 * a)
if abs(particle.state[0] - root - dis) <\
abs(particle.state[0] - root + dis):
root += dis
else:
root -= dis
particle.state[0] = root
particle.state[1] = m * root + intercept
# print((particle.state[0], particle.state[1]))
vel_norm = np.hypot(particle.state[0], particle.state[1])
dot = (particle.state[2] * particle.state[0] +\
particle.state[3] * particle.state[1]) / (vel_norm ** 2)
particle.state[2] = particle.state[2] - 2 * dot *\
particle.state[0]
particle.state[3] = particle.state[3] - 2 * dot *\
particle.state[1]
# particle.state[3] *= -1/2;
# particle.state[2] *= -1;
# print((particle.state[2], particle.state[3]))
#print((particle.state[0], particle.state[1]))
elif abs(particle.state[2] / particle.state[3]) <= 1:
m = particle.state[2] / particle.state[3]
intercept = m * -particle.state[1] + particle.state[0]
# discriminant
b = 2 * m * intercept
a = m ** 2 + 1
c = -self.radius ** 2 + intercept ** 2
# choose root based on proximity with current y
root = -b / (2 * a)
dis = (np.sqrt(abs(b ** 2 - 4 * a * c))) / (2 * a)
if abs(particle.state[1] - root - dis) <\
abs(particle.state[1] - root + dis):
root += dis
else:
root -= dis
# vel = [particle.state[0], particle.state[1]] / vel_norm
particle.state[0] = m * root + intercept
particle.state[1] = root
# print((particle.state[0], particle.state[1]))
# update velocity based on r = d - 2(r.n)r
vel_norm = np.hypot(particle.state[0], particle.state[1])
dot = (particle.state[2] * particle.state[0] +\
particle.state[3] * particle.state[1]) / (vel_norm ** 2)
particle.state[2] = particle.state[2] - 2 * dot *\
particle.state[0]
particle.state[3] = particle.state[3] - 2 * dot *\
particle.state[1]
# print((particle.state[2], particle.state[3]))
#print((particle.state[0], particle.state[1]))
elif particle.state[0] > self.length:
if np.hypot(particle.state[0] - self.length, particle.state[1]) > self.radius:
#shift table so that centre of right circle is origin
particle.state[0] = particle.state[0] - self.length
if particle.state[2] != 0 and abs(particle.state[3] / particle.state[2]) <= 1:
m = particle.state[3] / particle.state[2]
intercept = m * -particle.state[0] + particle.state[1]
# discrimanant
b = 2 * m * intercept
a = m ** 2 + 1
c = -self.radius ** 2 + intercept ** 2
# choose root based on proximity with x
root = -b / (2 * a)
dis = (np.sqrt(abs(b ** 2 - 4 * a * c))) / (2 * a)
if abs(particle.state[0] - root - dis) <\
abs(particle.state[0] - root + dis):
root += dis
else:
root -= dis
particle.state[0] = root
particle.state[1] = m * root + intercept
# print((particle.state[0], particle.state[1]))
vel_norm = np.hypot(particle.state[0], particle.state[1])
dot = (particle.state[2] * particle.state[0] + particle.state[3] * particle.state[1]) / (vel_norm ** 2)
particle.state[2] = particle.state[2] - 2 * dot * particle.state[0]
particle.state[3] = particle.state[3] - 2 * dot * particle.state[1]
# particle.state[3] *= -1/2;
# particle.state[2] *= -1;
# print((particle.state[2], particle.state[3]))
#print((particle.state[0], particle.state[1]))
elif abs(particle.state[2] / particle.state[3]) <= 1:
m = particle.state[2] / particle.state[3]
intercept = m * -particle.state[1] + particle.state[0]
# discriminant
b = 2 * m * intercept
a = m ** 2 + 1
c = -self.radius ** 2 + intercept ** 2
# choose root based on proximity with current y
root = -b / (2 * a)
dis = (np.sqrt(abs(b ** 2 - 4 * a * c))) / (2 * a)
if abs(particle.state[1] - root - dis) < abs(particle.state[1] - root + dis):
root += dis
else:
root -= dis
# vel = [particle.state[0], particle.state[1]] / vel_norm
particle.state[0] = m * root + intercept
particle.state[1] = root
# print((particle.state[0], particle.state[1]))
# update velocity based on r = d - 2(r.n)r
vel_norm = np.hypot(particle.state[0], particle.state[1])
dot = (particle.state[2] * particle.state[0] + particle.state[3] * particle.state[1]) / (vel_norm ** 2)
particle.state[2] = particle.state[2] - 2 * dot * particle.state[0]
particle.state[3] = particle.state[3] - 2 * dot * particle.state[1]
# print((particle.state[2], particle.state[3]))
#print((particle.state[0], particle.state[1]))
#reset table to original origin
particle.state[0] = particle.state[0] + self.length
if __name__ == '__main__':
table = Buminovich()
table.main()
| mit |
3D-e-Chem/python-modified-tanimoto | tests/test_frozen.py | 2 | 9637 | # Copyright 2016 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
import pandas as pd
import pandas.util.testing as pdt
from .utils import FrozenSimilarityMatrixInMemory, SimilarityMatrixInMemory
@pytest.fixture
def similarity_matrix():
pair_matrix_inmem = SimilarityMatrixInMemory()
pair_matrix = pair_matrix_inmem.matrix
labels = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
similarities = [
('a', 'b', 0.9),
('a', 'c', 0.5),
('b', 'c', 0.6),
('d', 'c', 0.7)
]
pair_matrix.update(similarities, labels)
yield pair_matrix
pair_matrix_inmem.close()
@pytest.fixture
def frozen_similarity_matrix():
matrix_inmem = FrozenSimilarityMatrixInMemory()
matrix = matrix_inmem.matrix
yield matrix
matrix_inmem.close()
def fillit(frozen_similarity_matrix):
labels = ['a', 'b', 'c', 'd']
data = [
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0],
]
frozen_similarity_matrix.from_array(np.array(data), labels)
class TestFrozenSimilarityMatrix(object):
def test_from_pairs_defaults(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
result = frozen_similarity_matrix.to_pandas()
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_from_pairs_multiframe(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 1, None, False)
result = frozen_similarity_matrix.to_pandas()
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_from_pairs_limited(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 1, 1, False)
result = frozen_similarity_matrix.to_pandas()
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.0, 0.0],
[0.9, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_from_pairs_singlesided(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10, None, True)
result = frozen_similarity_matrix.to_pandas()
print(result)
labels = ['a', 'b', 'c', 'd']
expected = pd.DataFrame([
[0.0, 0.9, 0.5, 0.0],
[0.0, 0.0, 0.6, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.7, 0.0]
], index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_find_defaults(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
hits = frozen_similarity_matrix.find('c', 0.55)
expected = [('d', 0.7), ('b', 0.6)]
assert hits == expected
def test_find_limit(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
hits = frozen_similarity_matrix.find('c', 0.55 , 1)
expected = [('d', 0.7)]
assert hits == expected
def test_find_cutoffhigh_nohits(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
hits = frozen_similarity_matrix.find('c', 0.9)
expected = []
assert hits == expected
def test_find_badkey_keyerror(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
with pytest.raises(KeyError):
frozen_similarity_matrix.find('f', 0.45)
def test_find_singlesided(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10, None, True)
print(frozen_similarity_matrix.scores.read())
hits = frozen_similarity_matrix.find('c', 0.0)
expected = []
assert hits == expected
def test_from_array(self, similarity_matrix, frozen_similarity_matrix):
labels = ['a', 'b', 'c', 'd']
data = [
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0],
]
frozen_similarity_matrix.from_array(np.array(data), labels)
result = frozen_similarity_matrix.to_pandas()
expected = pd.DataFrame(data, index=labels, columns=labels)
pdt.assert_almost_equal(result, expected)
def test_getitem_row(self, similarity_matrix, frozen_similarity_matrix):
labels = ['a', 'b', 'c', 'd']
data = [
[0.0, 0.9, 0.5, 0.0],
[0.9, 0.0, 0.6, 0.0],
[0.5, 0.6, 0.0, 0.7],
[0.0, 0.0, 0.7, 0.0],
]
frozen_similarity_matrix.from_array(np.array(data), labels)
result = []
for label in labels:
result.append(frozen_similarity_matrix[label])
expected = [
[(u'b', 0.9), (u'c', 0.5), (u'd', 0.0)],
[(u'a', 0.9), (u'c', 0.6), (u'd', 0.0)],
[(u'a', 0.5), (u'b', 0.6), (u'd', 0.7)],
[(u'a', 0.0), (u'b', 0.0), (u'c', 0.7)],
]
assert result == expected
@pytest.mark.parametrize('frag1,frag2,expected', (
('a', 'a', 1.0),
('a', 'b', 0.9),
('a', 'c', 0.5),
('b', 'c', 0.6),
('d', 'c', 0.7),
))
def test_getitem_cell(self, frozen_similarity_matrix, frag1, frag2, expected):
fillit(frozen_similarity_matrix)
score = frozen_similarity_matrix[frag1, frag2]
assert score == expected
def test__iter__(self, frozen_similarity_matrix):
fillit(frozen_similarity_matrix)
result = set(frozen_similarity_matrix)
expected = {
('a', 'b', 0.9),
('a', 'c', 0.5),
('b', 'c', 0.6),
('c', 'd', 0.7),
}
assert result == expected
def test_getitem_row_unknownfrag_keyerror(self, frozen_similarity_matrix):
fillit(frozen_similarity_matrix)
with pytest.raises(KeyError):
frozen_similarity_matrix['z']
def test_getitem_cell_unknownfrag_keyerror(self, frozen_similarity_matrix):
fillit(frozen_similarity_matrix)
with pytest.raises(KeyError):
frozen_similarity_matrix['z', 'z']
def test_to_pairs(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
with SimilarityMatrixInMemory() as thawed_matrix:
frozen_similarity_matrix.to_pairs(thawed_matrix)
# compare scores
assert_array_almost_equal(
[d[2] for d in thawed_matrix],
[d[2] for d in similarity_matrix],
5
)
def test_count(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
counts = list(frozen_similarity_matrix.count())
expected = [(0.5, 1),
(0.6, 1),
(0.7, 1),
(0.9, 1)]
assert_array_almost_equal(counts, expected, 6)
def test_count_lower_triangle(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
counts = list(frozen_similarity_matrix.count(lower_triangle=True))
expected = [(0.5, 1),
(0.6, 1),
(0.7, 1),
(0.9, 1)]
assert_array_almost_equal(counts, expected, 6)
def test_count_cmp_triangle(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
upper_triangle = list(frozen_similarity_matrix.count(lower_triangle=False))
lower_triangle = list(frozen_similarity_matrix.count(lower_triangle=True))
assert_array_almost_equal(upper_triangle, lower_triangle, 6)
def test_count_raw_score(self, similarity_matrix, frozen_similarity_matrix):
frozen_similarity_matrix.from_pairs(similarity_matrix, 10)
counts = list(frozen_similarity_matrix.count(raw_score=True))
expected = [(32767, 1),
(39321, 1),
(45874, 1),
(58981, 1)]
assert_array_almost_equal(counts, expected, 6)
| apache-2.0 |
schets/scikit-learn | sklearn/ensemble/partial_dependence.py | 6 | 14973 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
Nyker510/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 155 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
Parallel-in-Time/pySDC | pySDC/playgrounds/deprecated/acoustic_1d_imex/playground.py | 1 | 3034 | import numpy as np
import pySDC.core.deprecated.PFASST_stepwise as mp
from ProblemClass import acoustic_1d_imex
from matplotlib import pyplot as plt
from pySDC.projects.FastWaveSlowWave.HookClass import plot_solution
from pySDC.core import CollocationClasses as collclass
from pySDC.core import Log
from pySDC.implementations.datatype_classes import mesh, rhs_imex_mesh
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
if __name__ == "__main__":
# set global logger (remove this if you do not want the output at all)
logger = Log.setup_custom_logger('root')
num_procs = 1
# This comes as read-in for the level class
lparams = {}
lparams['restol'] = 1E-10
sparams = {}
sparams['maxiter'] = 2
# setup parameters "in time"
t0 = 0.0
Tend = 3.0
dt = Tend/float(154)
# This comes as read-in for the problem class
pparams = {}
pparams['nvars'] = [(2,512)]
pparams['cadv'] = 0.1
pparams['cs'] = 1.0
pparams['order_adv'] = 5
# This comes as read-in for the transfer operations
tparams = {}
tparams['finter'] = True
# Fill description dictionary for easy hierarchy creation
description = {}
description['problem_class'] = acoustic_1d_imex
description['problem_params'] = pparams
description['dtype_u'] = mesh
description['dtype_f'] = rhs_imex_mesh
description['collocation_class'] = collclass.CollGaussLobatto
description['num_nodes'] = 2
description['sweeper_class'] = imex_1st_order
description['level_params'] = lparams
description['hook_class'] = plot_solution
#description['transfer_class'] = mesh_to_mesh
#description['transfer_params'] = tparams
# quickly generate block of steps
MS = mp.generate_steps(num_procs,sparams,description)
# get initial values on finest level
P = MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend,stats = mp.run_pfasst(MS,u0=uinit,t0=t0,dt=dt,Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
print('error at time %s: %s' %(Tend,np.linalg.norm(uex.values-uend.values,np.inf)/np.linalg.norm(uex.values,np.inf)))
fig = plt.figure(figsize=(8,8))
sigma_0 = 0.1
x_0 = 0.75
#plt.plot(P.mesh, uex.values[0,:], '+', color='b', label='u (exact)')
plt.plot(P.mesh, uend.values[1,:], '-', color='b', label='SDC')
#plt.plot(P.mesh, uex.values[1,:], '+', color='r', label='p (exact)')
#plt.plot(P.mesh, uend.values[1,:], '-', color='b', linewidth=2.0, label='p (SDC)')
p_slow = np.exp(-np.square(P.mesh-x_0)/(sigma_0*sigma_0))
#plt.plot(P.mesh, p_slow, '-', color='r', markersize=4, label='slow mode')
plt.legend(loc=2)
plt.xlim([0, 1])
plt.ylim([-0.1, 1.1])
fig.gca().grid()
#plt.show()
plt.gcf().savefig('fwsw-sdc-K'+str(sparams['maxiter'])+'-M'+str(description['num_nodes'])+'.pdf', bbox_inches='tight')
| bsd-2-clause |
uvchik/pvlib-python | pvlib/tmy.py | 1 | 28220 | """
Import functions for TMY2 and TMY3 data files.
"""
import re
import datetime
import dateutil
import io
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import pandas as pd
def readtmy3(filename=None, coerce_year=None, recolumn=True):
'''
Read a TMY3 file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the TMY3 file (i.e. units are retained). In the case of any
discrepencies between this documentation and the TMY3 User's Manual
[1], the TMY3 User's Manual takes precedence.
The TMY3 files were updated in Jan. 2015. This function requires the
use of the updated files.
Parameters
----------
filename : None or string
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int
If supplied, the year of the data will be set to this value.
recolumn : bool
If True, apply standard names to TMY3 columns. Typically this
results in stripping the units from the column name.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the TMY3 User's Manual ([1]), especially tables 1-1
through 1-6.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
altitude Float site elevation
latitude Float site latitudeitude
longitude Float site longitudeitude
Name String site name
State String state
TZ Float UTC offset
USAF Int USAF identifier
=============== ====== ===================
============================= ======================================================================================================================================================
TMYData field description
============================= ======================================================================================================================================================
TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not indcluded)
TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.GHISource See [1], Table 1-4
TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
TMYData.DNISource See [1], Table 1-4
TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
TMYData.DHISource See [1], Table 1-4
TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.GHillumSource See [1], Table 1-4
TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DNillumSource See [1], Table 1-4
TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
TMYData.DHillumSource See [1], Table 1-4
TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2]
TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
TMYData.ZenithlumSource See [1], Table 1-4
TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10
TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.TotCldUnertainty See [1], Table 1-6
TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.OpqCldUncertainty See [1], Table 1-6
TMYData.DryBulb Dry bulb temperature at the time indicated, deg C
TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DryBulbUncertainty See [1], Table 1-6
TMYData.DewPoint Dew-point temperature at the time indicated, deg C
TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.DewPointUncertainty See [1], Table 1-6
TMYData.RHum Relatitudeive humidity at the time indicated, percent
TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.RHumUncertainty See [1], Table 1-6
TMYData.Pressure Station pressure at the time indicated, 1 mbar
TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PressureUncertainty See [1], Table 1-6
TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WdirUncertainty See [1], Table 1-6
TMYData.Wspd Wind speed at the time indicated, meter/second
TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.WspdUncertainty See [1], Table 1-6
TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter
TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.HvisUncertainty See [1], Table 1-6
TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter
TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.CeilHgtUncertainty See [1], Table 1-6
TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.PwatUncertainty See [1], Table 1-6
TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AODUncertainty See [1], Table 1-6
TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless
TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.AlbUncertainty See [1], Table 1-6
TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour
TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings
TMYData.LprecipUncertainty See [1], Table 1-6
TMYData.PresWth Present weather code, see [2].
TMYData.PresWthSource Present weather code source, see [2].
TMYData.PresWthUncertainty Present weather code uncertainty, see [2].
============================= ======================================================================================================================================================
References
----------
[1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets".
NREL/TP-581-43156, Revised May 2008.
[2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005
Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
'''
if filename is None:
try:
filename = _interactive_load()
except:
raise Exception('Interactive load failed. Tkinter not supported ' +
'on this system. Try installing X-Quartz and ' +
'reloading')
head = ['USAF', 'Name', 'State', 'TZ', 'latitude', 'longitude', 'altitude']
try:
csvdata = open(filename, 'r')
except IOError:
response = urlopen(filename)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
# read in file metadata
meta = dict(zip(head, csvdata.readline().rstrip('\n').split(",")))
# convert metadata strings to numeric types
meta['altitude'] = float(meta['altitude'])
meta['latitude'] = float(meta['latitude'])
meta['longitude'] = float(meta['longitude'])
meta['TZ'] = float(meta['TZ'])
meta['USAF'] = int(meta['USAF'])
data = pd.read_csv(
filename, header=1,
parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']},
date_parser=lambda *x: _parsedate(*x, year=coerce_year),
index_col='datetime')
if recolumn:
_recolumn(data) # rename to standard column names
data = data.tz_localize(int(meta['TZ']*3600))
return data, meta
def _interactive_load():
import Tkinter
from tkFileDialog import askopenfilename
Tkinter.Tk().withdraw() # Start interactive file input
return askopenfilename()
def _parsedate(ymd, hour, year=None):
# stupidly complicated due to TMY3's usage of hour 24
# and dateutil's inability to handle that.
offset_hour = int(hour[:2]) - 1
offset_datetime = '{} {}:00'.format(ymd, offset_hour)
offset_date = dateutil.parser.parse(offset_datetime)
true_date = offset_date + dateutil.relativedelta.relativedelta(hours=1)
if year is not None:
true_date = true_date.replace(year=year)
return true_date
def _recolumn(tmy3_dataframe, inplace=True):
"""
Rename the columns of the TMY3 DataFrame.
Parameters
----------
tmy3_dataframe : DataFrame
inplace : bool
passed to DataFrame.rename()
Returns
-------
Recolumned DataFrame.
"""
raw_columns = 'ETR (W/m^2),ETRN (W/m^2),GHI (W/m^2),GHI source,GHI uncert (%),DNI (W/m^2),DNI source,DNI uncert (%),DHI (W/m^2),DHI source,DHI uncert (%),GH illum (lx),GH illum source,Global illum uncert (%),DN illum (lx),DN illum source,DN illum uncert (%),DH illum (lx),DH illum source,DH illum uncert (%),Zenith lum (cd/m^2),Zenith lum source,Zenith lum uncert (%),TotCld (tenths),TotCld source,TotCld uncert (code),OpqCld (tenths),OpqCld source,OpqCld uncert (code),Dry-bulb (C),Dry-bulb source,Dry-bulb uncert (code),Dew-point (C),Dew-point source,Dew-point uncert (code),RHum (%),RHum source,RHum uncert (code),Pressure (mbar),Pressure source,Pressure uncert (code),Wdir (degrees),Wdir source,Wdir uncert (code),Wspd (m/s),Wspd source,Wspd uncert (code),Hvis (m),Hvis source,Hvis uncert (code),CeilHgt (m),CeilHgt source,CeilHgt uncert (code),Pwat (cm),Pwat source,Pwat uncert (code),AOD (unitless),AOD source,AOD uncert (code),Alb (unitless),Alb source,Alb uncert (code),Lprecip depth (mm),Lprecip quantity (hr),Lprecip source,Lprecip uncert (code),PresWth (METAR code),PresWth source,PresWth uncert (code)'
new_columns = [
'ETR', 'ETRN', 'GHI', 'GHISource', 'GHIUncertainty',
'DNI', 'DNISource', 'DNIUncertainty', 'DHI', 'DHISource',
'DHIUncertainty', 'GHillum', 'GHillumSource', 'GHillumUncertainty',
'DNillum', 'DNillumSource', 'DNillumUncertainty', 'DHillum',
'DHillumSource', 'DHillumUncertainty', 'Zenithlum',
'ZenithlumSource', 'ZenithlumUncertainty', 'TotCld', 'TotCldSource',
'TotCldUnertainty', 'OpqCld', 'OpqCldSource', 'OpqCldUncertainty',
'DryBulb', 'DryBulbSource', 'DryBulbUncertainty', 'DewPoint',
'DewPointSource', 'DewPointUncertainty', 'RHum', 'RHumSource',
'RHumUncertainty', 'Pressure', 'PressureSource',
'PressureUncertainty', 'Wdir', 'WdirSource', 'WdirUncertainty',
'Wspd', 'WspdSource', 'WspdUncertainty', 'Hvis', 'HvisSource',
'HvisUncertainty', 'CeilHgt', 'CeilHgtSource', 'CeilHgtUncertainty',
'Pwat', 'PwatSource', 'PwatUncertainty', 'AOD', 'AODSource',
'AODUncertainty', 'Alb', 'AlbSource', 'AlbUncertainty',
'Lprecipdepth', 'Lprecipquantity', 'LprecipSource',
'LprecipUncertainty', 'PresWth', 'PresWthSource',
'PresWthUncertainty']
mapping = dict(zip(raw_columns.split(','), new_columns))
return tmy3_dataframe.rename(columns=mapping, inplace=True)
def readtmy2(filename):
'''
Read a TMY2 file in to a DataFrame.
Note that values contained in the DataFrame are unchanged from the
TMY2 file (i.e. units are retained). Time/Date and location data
imported from the TMY2 file have been modified to a "friendlier"
form conforming to modern conventions (e.g. N latitude is postive, E
longitude is positive, the "24th" hour of any day is technically the
"0th" hour of the next day). In the case of any discrepencies
between this documentation and the TMY2 User's Manual [1], the TMY2
User's Manual takes precedence.
Parameters
----------
filename : None or string
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A dataframe with the columns described in the table below. For a
more detailed descriptions of each component, please consult the
TMY2 User's Manual ([1]), especially tables 3-1 through 3-6, and
Appendix B.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
============= ==================================
key description
============= ==================================
WBAN Site identifier code (WBAN number)
City Station name
State Station state 2 letter designator
TZ Hours from Greenwich
latitude Latitude in decimal degrees
longitude Longitude in decimal degrees
altitude Site elevation in meters
============= ==================================
============================ ==========================================================================================================================================================================
TMYData field description
============================ ==========================================================================================================================================================================
index Pandas timeseries object containing timestamps
year
month
day
hour
ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
GHISource See [1], Table 3-3
GHIUncertainty See [1], Table 3-4
DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
DNISource See [1], Table 3-3
DNIUncertainty See [1], Table 3-4
DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
DHISource See [1], Table 3-3
DHIUncertainty See [1], Table 3-4
GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, units of 100 lux (e.g. value of 50 = 5000 lux)
GHillumSource See [1], Table 3-3
GHillumUncertainty See [1], Table 3-4
DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, units of 100 lux
DNillumSource See [1], Table 3-3
DNillumUncertainty See [1], Table 3-4
DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, units of 100 lux
DHillumSource See [1], Table 3-3
DHillumUncertainty See [1], Table 3-4
Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, units of 10 Cd/m^2 (e.g. value of 700 = 7,000 Cd/m^2)
ZenithlumSource See [1], Table 3-3
ZenithlumUncertainty See [1], Table 3-4
TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
TotCldSource See [1], Table 3-5, 8760x1 cell array of strings
TotCldUnertainty See [1], Table 3-6
OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
OpqCldSource See [1], Table 3-5, 8760x1 cell array of strings
OpqCldUncertainty See [1], Table 3-6
DryBulb Dry bulb temperature at the time indicated, in tenths of degree C (e.g. 352 = 35.2 C).
DryBulbSource See [1], Table 3-5, 8760x1 cell array of strings
DryBulbUncertainty See [1], Table 3-6
DewPoint Dew-point temperature at the time indicated, in tenths of degree C (e.g. 76 = 7.6 C).
DewPointSource See [1], Table 3-5, 8760x1 cell array of strings
DewPointUncertainty See [1], Table 3-6
RHum Relative humidity at the time indicated, percent
RHumSource See [1], Table 3-5, 8760x1 cell array of strings
RHumUncertainty See [1], Table 3-6
Pressure Station pressure at the time indicated, 1 mbar
PressureSource See [1], Table 3-5, 8760x1 cell array of strings
PressureUncertainty See [1], Table 3-6
Wdir Wind direction at time indicated, degrees from east of north (360 = 0 = north; 90 = East; 0 = undefined,calm)
WdirSource See [1], Table 3-5, 8760x1 cell array of strings
WdirUncertainty See [1], Table 3-6
Wspd Wind speed at the time indicated, in tenths of meters/second (e.g. 212 = 21.2 m/s)
WspdSource See [1], Table 3-5, 8760x1 cell array of strings
WspdUncertainty See [1], Table 3-6
Hvis Distance to discernable remote objects at time indicated (7777=unlimited, 9999=missing data), in tenths of kilometers (e.g. 341 = 34.1 km).
HvisSource See [1], Table 3-5, 8760x1 cell array of strings
HvisUncertainty See [1], Table 3-6
CeilHgt Height of cloud base above local terrain (7777=unlimited, 88888=cirroform, 99999=missing data), in meters
CeilHgtSource See [1], Table 3-5, 8760x1 cell array of strings
CeilHgtUncertainty See [1], Table 3-6
Pwat Total precipitable water contained in a column of unit cross section from Earth to top of atmosphere, in millimeters
PwatSource See [1], Table 3-5, 8760x1 cell array of strings
PwatUncertainty See [1], Table 3-6
AOD The broadband aerosol optical depth (broadband turbidity) in thousandths on the day indicated (e.g. 114 = 0.114)
AODSource See [1], Table 3-5, 8760x1 cell array of strings
AODUncertainty See [1], Table 3-6
SnowDepth Snow depth in centimeters on the day indicated, (999 = missing data).
SnowDepthSource See [1], Table 3-5, 8760x1 cell array of strings
SnowDepthUncertainty See [1], Table 3-6
LastSnowfall Number of days since last snowfall (maximum value of 88, where 88 = 88 or greater days; 99 = missing data)
LastSnowfallSource See [1], Table 3-5, 8760x1 cell array of strings
LastSnowfallUncertainty See [1], Table 3-6
PresentWeather See [1], Appendix B, an 8760x1 cell array of strings. Each string contains 10 numeric values. The string can be parsed to determine each of 10 observed weather metrics.
============================ ==========================================================================================================================================================================
References
----------
[1] Marion, W and Urban, K. "Wilcox, S and Marion, W. "User's Manual
for TMY2s". NREL 1995.
'''
if filename is None:
try:
filename = _interactive_load()
except:
raise Exception('Interactive load failed. Tkinter not supported on this system. Try installing X-Quartz and reloading')
string = '%2d%2d%2d%2d%4d%4d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%2d%1s%1d%2d%1s%1d%4d%1s%1d%4d%1s%1d%3d%1s%1d%4d%1s%1d%3d%1s%1d%3d%1s%1d%4d%1s%1d%5d%1s%1d%10d%3d%1s%1d%3d%1s%1d%3d%1s%1d%2d%1s%1d'
columns = 'year,month,day,hour,ETR,ETRN,GHI,GHISource,GHIUncertainty,DNI,DNISource,DNIUncertainty,DHI,DHISource,DHIUncertainty,GHillum,GHillumSource,GHillumUncertainty,DNillum,DNillumSource,DNillumUncertainty,DHillum,DHillumSource,DHillumUncertainty,Zenithlum,ZenithlumSource,ZenithlumUncertainty,TotCld,TotCldSource,TotCldUnertainty,OpqCld,OpqCldSource,OpqCldUncertainty,DryBulb,DryBulbSource,DryBulbUncertainty,DewPoint,DewPointSource,DewPointUncertainty,RHum,RHumSource,RHumUncertainty,Pressure,PressureSource,PressureUncertainty,Wdir,WdirSource,WdirUncertainty,Wspd,WspdSource,WspdUncertainty,Hvis,HvisSource,HvisUncertainty,CeilHgt,CeilHgtSource,CeilHgtUncertainty,PresentWeather,Pwat,PwatSource,PwatUncertainty,AOD,AODSource,AODUncertainty,SnowDepth,SnowDepthSource,SnowDepthUncertainty,LastSnowfall,LastSnowfallSource,LastSnowfallUncertaint'
hdr_columns = 'WBAN,City,State,TZ,latitude,longitude,altitude'
TMY2, TMY2_meta = _read_tmy2(string, columns, hdr_columns, filename)
return TMY2, TMY2_meta
def _parsemeta_tmy2(columns, line):
"""Retrieves metadata from the top line of the tmy2 file.
Parameters
----------
columns : string
String of column headings in the header
line : string
Header string containing DataFrame
Returns
-------
meta : Dict of metadata contained in the header string
"""
# Remove duplicated spaces, and read in each element
rawmeta = " ".join(line.split()).split(" ")
meta = rawmeta[:3] # take the first string entries
meta.append(int(rawmeta[3]))
# Convert to decimal notation with S negative
longitude = (
float(rawmeta[5]) + float(rawmeta[6])/60) * (2*(rawmeta[4] == 'N') - 1)
# Convert to decimal notation with W negative
latitude = (
float(rawmeta[8]) + float(rawmeta[9])/60) * (2*(rawmeta[7] == 'E') - 1)
meta.append(longitude)
meta.append(latitude)
meta.append(float(rawmeta[10]))
# Creates a dictionary of metadata
meta_dict = dict(zip(columns.split(','), meta))
return meta_dict
def _read_tmy2(string, columns, hdr_columns, fname):
head = 1
date = []
with open(fname) as infile:
fline = 0
for line in infile:
# Skip the header
if head != 0:
meta = _parsemeta_tmy2(hdr_columns, line)
head -= 1
continue
# Reset the cursor and array for each line
cursor = 1
part = []
for marker in string.split('%'):
# Skip the first line of markers
if marker == '':
continue
# Read the next increment from the marker list
increment = int(re.findall('\d+', marker)[0])
# Extract the value from the line in the file
val = (line[cursor:cursor+increment])
# increment the cursor by the length of the read value
cursor = cursor+increment
# Determine the datatype from the marker string
if marker[-1] == 'd':
try:
val = float(val)
except:
raise Exception('WARNING: In' + fname +
' Read value is not an integer " ' +
val + ' " ')
elif marker[-1] == 's':
try:
val = str(val)
except:
raise Exception('WARNING: In' + fname +
' Read value is not a string" ' +
val + ' " ')
else:
raise Exception('WARNING: In' + __name__ +
'Improper column DataFrame " %' +
marker + ' " ')
part.append(val)
if fline == 0:
axes = [part]
year = part[0]+1900
fline = 1
else:
axes.append(part)
# Create datetime objects from read data
date.append(datetime.datetime(year=int(year),
month=int(part[1]),
day=int(part[2]),
hour=int(part[3])-1))
data = pd.DataFrame(
axes, index=date,
columns=columns.split(',')).tz_localize(int(meta['TZ']*3600))
return data, meta
| bsd-3-clause |
abvanpelt/tickmate | analysis/tmkit/linear_regression.py | 5 | 2249 | import sqlite3
from sklearn import linear_model
import numpy as np
import pandas as pd
import datetime
import sys
conn = sqlite3.connect(sys.argv[1])
c = conn.cursor();
c.execute("select _id, name from tracks")
rows = c.fetchall()
track_names = pd.DataFrame([{'track_name': row[1]} for row in rows])
track_ids = [int(row[0]) for row in rows]
track_cnt = len(track_ids)
print "Found {0} tracks.".format(track_cnt)
c.execute("select * from ticks")
last_tick = c.fetchall()[-1]
last_day = datetime.date(last_tick[2], last_tick[3], last_tick[4])
def window(day, n=20):
"return a matrix of the last `n` days before day `day`"
tick_date = "date(year || '-' || substr('0' || month, -2, 2) || " + \
"'-' || substr('0' || day, -2, 2))"
max_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day)
min_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day-datetime.timedelta(n))
c.execute("select * from ticks where {d} <= {max_date} and {d} >= {min_date}".\
format(d=tick_date, max_date=max_date, min_date=min_date))
# ticktrix is the matrix containing the ticks
ticktrix = np.zeros((n, track_cnt))
for row in c.fetchall():
print row
try:
row_date = datetime.date(row[2], row[3], row[4])
except ValueError:
print "Error constructing date from", row
x = -(row_date - day).days
y = track_ids.index(int(row[1]))
if x < n:
ticktrix[x, y] = 1
return ticktrix
last_day -= datetime.timedelta(1)
print "Fitting for day:", last_day
my_window = window(last_day)
target_data = my_window[0,:].T
training_data = my_window[1:,:].T
print "Target:", target_data.shape
print target_data
print "Training:", training_data.shape
print training_data
reg = linear_model.LinearRegression()
reg.fit(training_data, target_data)
print "Coefficents:", reg.coef_.shape
print reg.coef_
print "Applied to training data:"
print np.dot(training_data, reg.coef_)
print "Forecast"
#print np.dot(my_window[:19,:].T, reg.coef_)
#print track_names
df = pd.DataFrame()
df['track'] = track_names
df['prob'] = pd.Series(np.dot(my_window[:19,:].T, reg.coef_) * 100.0)
print df
| gpl-3.0 |
treycausey/scikit-learn | sklearn/utils/fixes.py | 1 | 2946 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import numpy as np
np_version = []
for x in np.__version__.split('.'):
try:
np_version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
np_version.append(x)
np_version = tuple(np_version)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.copy(x)
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
| bsd-3-clause |
alanch-ms/PTVS | Python/Product/Analyzer/BuiltinScraperTests.py | 3 | 18557 | # Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
__author__ = "Microsoft Corporation <[email protected]>"
__version__ = "3.0.0.0"
import re
import unittest
from pprint import pformat
from BuiltinScraper import parse_doc_str, BUILTIN, __builtins__, get_overloads_from_doc_string, TOKENS_REGEX
try:
unicode
except NameError:
from BuiltinScraper import unicode
import sys
class Test_BuiltinScraperTests(unittest.TestCase):
def check_doc_str(self, doc, module_name, func_name, expected, mod=None, extra_args=[], obj_class=None):
r = parse_doc_str(doc, module_name, mod, func_name, extra_args, obj_class)
# Quick pass if everything matches
if r == expected:
return
msg = 'Expected:\n%s\nActual\n%s' % (pformat(expected), pformat(r))
self.assertEqual(len(r), len(expected), msg)
def check_dict(e, a, indent):
if e == a:
return
missing_keys = set(e.keys()) - set(a.keys())
extra_keys = set(a.keys()) - set(e.keys())
mismatched_keys = [k for k in set(a.keys()) & set(e.keys()) if a[k] != e[k]]
if missing_keys:
print('%sDid not received following keys: %s' % (indent, ', '.join(missing_keys)))
if extra_keys:
print('%sDid not expect following keys: %s' % (indent, ', '.join(extra_keys)))
for k in mismatched_keys:
if isinstance(e[k], dict) and isinstance(a[k], dict):
check_dict(e[k], a[k], indent + ' ')
elif (isinstance(e[k], tuple) and isinstance(a[k], tuple) or isinstance(e[k], list) and isinstance(a[k], list)):
check_seq(e[k], a[k], indent + ' ')
else:
print('%sExpected "%s": "%s"' % (indent, k, e[k]))
print('%sActual "%s": "%s"' % (indent, k, a[k]))
print('')
def check_seq(e, a, indent):
if e == a:
return
for i, (e2, a2) in enumerate(zip(e, a)):
if isinstance(e2, dict) and isinstance(a2, dict):
check_dict(e2, a2, indent + ' ')
elif (isinstance(e2, tuple) and isinstance(a2, tuple) or isinstance(e2, list) and isinstance(a2, list)):
check_seq(e2, a2, indent + ' ')
elif e1 != a1:
print('%sExpected "%s"' % (indent, e2))
print('%sActual "%s"' % (indent, a2))
print('')
for e1, a1 in zip(expected, r):
check_dict(e1, a1, '')
self.fail(msg)
def test_regex(self):
self.assertSequenceEqual(
[i.strip() for i in re.split(TOKENS_REGEX, 'f(\'\', \'a\', \'a\\\'b\', "", "a", "a\\\"b")') if i.strip()],
['f', '(', "''", ',', "'a'", ',', "'a\\'b'", ',', '""', ',', '"a"', ',', '"a\\"b"', ')']
)
self.assertSequenceEqual(
[i.strip() for i in re.split(TOKENS_REGEX, 'f(1, 1., -1, -1.)') if i.strip()],
['f', '(', '1', ',', '1.', ',', '-1', ',', '-1.', ')']
)
self.assertSequenceEqual(
[i.strip() for i in re.split(TOKENS_REGEX, 'f(a, *a, **a, ...)') if i.strip()],
['f', '(', 'a', ',', '*', 'a', ',', '**', 'a', ',', '...', ')']
)
self.assertSequenceEqual(
[i.strip() for i in re.split(TOKENS_REGEX, 'f(a:123, a=123) --> => ->') if i.strip()],
['f', '(', 'a', ':', '123', ',', 'a', '=', '123', ')', '-->', '=>', '->']
)
def test_numpy_1(self):
self.check_doc_str(
"""arange([start,] stop[, step,], dtype=None)
Returns
-------
out : ndarray""",
'numpy',
'arange',
[{
'doc': 'Returns\n -------\n out : ndarray',
'ret_type': [('', 'ndarray')],
'args': (
{'name': 'start', 'default_value':'None'},
{'name': 'stop'},
{'name': 'step', 'default_value': 'None'},
{'name': 'dtype', 'default_value':'None'},
)
}]
)
def test_numpy_2(self):
self.check_doc_str(
"""arange([start,] stop[, step,], dtype=None)
Return - out : ndarray""",
'numpy',
'arange',
[{
'doc': 'Return - out : ndarray',
'ret_type': [('', 'ndarray')],
'args': (
{'name': 'start', 'default_value':'None'},
{'name': 'stop'},
{'name': 'step', 'default_value': 'None'},
{'name': 'dtype', 'default_value':'None'},
)
}]
)
def test_reduce(self):
self.check_doc_str(
'reduce(function, sequence[, initial]) -> value',
BUILTIN,
'reduce',
mod=__builtins__,
expected = [{
'args': (
{'name': 'function'},
{'name': 'sequence'},
{'default_value': 'None', 'name': 'initial'}
),
'doc': '',
'ret_type': [('', 'value')]
}]
)
def test_pygame_draw_arc(self):
self.check_doc_str(
'pygame.draw.arc(Surface, color, Rect, start_angle, stop_angle, width=1): return Rect',
'draw',
'arc',
[{
'args': (
{'name': 'Surface'},
{'name': 'color'},
{'name': 'Rect'},
{'name': 'start_angle'},
{'name': 'stop_angle'},
{'default_value': '1', 'name': 'width'}
),
'doc': '',
'ret_type': [('', 'Rect')]
}]
)
def test_isdigit(self):
self.check_doc_str(
'''B.isdigit() -> bool
Return True if all characters in B are digits
and there is at least one character in B, False otherwise.''',
'bytes',
'isdigit',
[{
'args': (),
'doc': 'Return True if all characters in B are digits\nand there is at least one character in B, False otherwise.',
'ret_type': [(BUILTIN, 'bool')]
}]
)
def test_init(self):
self.check_doc_str(
'x.__init__(...) initializes x; see help(type(x)) for signature',
'str',
'__init__',
[{'args': ({'arg_format': '*', 'name': 'args'},), 'doc': 'initializes x; see help(type(x)) for signature'}]
)
def test_find(self):
self.check_doc_str(
'S.find(sub [,start [,end]]) -> int',
'str',
'find',
[{
'args': (
{'name': 'sub'},
{'default_value': 'None', 'name': 'start'},
{'default_value': 'None', 'name': 'end'}
),
'doc': '',
'ret_type': [(BUILTIN, 'int')]
}]
)
def test_format(self):
self.check_doc_str(
'S.format(*args, **kwargs) -> unicode',
'str',
'format',
[{
'args': (
{'arg_format': '*', 'name': 'args'},
{'arg_format': '**', 'name': 'kwargs'}
),
'doc': '',
'ret_type': [(BUILTIN, unicode.__name__)]
}]
)
def test_ascii(self):
self.check_doc_str(
"'ascii(object) -> string\n\nReturn the same as repr(). In Python 3.x, the repr() result will\\ncontain printable characters unescaped, while the ascii() result\\nwill have such characters backslash-escaped.'",
'future_builtins',
'ascii',
[{
'args': ({'name': 'object'},),
'doc': "Return the same as repr(). In Python 3.x, the repr() result will\\ncontain printable characters unescaped, while the ascii() result\\nwill have such characters backslash-escaped.'",
'ret_type': [(BUILTIN, 'str')]
}]
)
def test_preannotation(self):
self.check_doc_str(
'f(INT class_code) => SpaceID',
'fob',
'f',
[{
'args': ({'name': 'class_code', 'type': [(BUILTIN, 'int')]},),
'doc': '',
'ret_type': [('', 'SpaceID')]
}])
def test_compress(self):
self.check_doc_str(
'compress(data, selectors) --> iterator over selected data\n\nReturn data elements',
'itertools',
'compress',
[{
'args': ({'name': 'data'}, {'name': 'selectors'}),
'doc': 'Return data elements',
'ret_type': [('', 'iterator')]
}]
)
def test_isinstance(self):
self.check_doc_str(
'isinstance(object, class-or-type-or-tuple) -> bool\n\nReturn whether an object is an '
'instance of a class or of a subclass thereof.\nWith a type as second argument, '
'return whether that is the object\'s type.\nThe form using a tuple, isinstance(x, (A, B, ...)),'
' is a shortcut for\nisinstance(x, A) or isinstance(x, B) or ... (etc.).',
BUILTIN,
'isinstance',
[{
'args': ({'name': 'object'}, {'name': 'class-or-type-or-tuple'}),
'doc': "Return whether an object is an instance of a class or of a subclass thereof.\n"
"With a type as second argument, return whether that is the object's type.\n"
"The form using a tuple, isinstance(x, (A, B, ...)), is a shortcut for\n"
"isinstance(x, A) or isinstance(x, B) or ... (etc.).",
'ret_type': [(BUILTIN, 'bool')]
}]
)
def test_tuple_parameters(self):
self.check_doc_str(
'pygame.Rect(left, top, width, height): return Rect\n'
'pygame.Rect((left, top), (width, height)): return Rect\n'
'pygame.Rect(object): return Rect\n'
'pygame object for storing rectangular coordinates',
'pygame',
'Rect',
[{
'args': ({'name': 'left'}, {'name': 'top'}, {'name': 'width'}, {'name': 'height'}),
'doc': 'pygame object for storing rectangular coordinates',
'ret_type': [('', 'Rect')]
},
{
'args': ({'name': 'left, top'}, {'name': 'width, height'}),
'doc': 'pygame object for storing rectangular coordinates',
'ret_type': [('', 'Rect')]
},
{
'args': ({'name': 'object'},),
'doc': 'pygame object for storing rectangular coordinates',
'ret_type': [('', 'Rect')]
}]
)
def test_read(self):
self.check_doc_str(
'read([size]) -> read at most size bytes, returned as a string.\n\n'
'If the size argument is negative or omitted, read until EOF is reached.\n'
'Notice that when in non-blocking mode, less data than what was requested\n'
'may be returned, even if no size parameter was given.',
BUILTIN,
'read',
mod=__builtins__,
expected=[{
'args': ({'default_value': 'None', 'name': 'size'},),
'doc': 'read at most size bytes, returned as a string.\n\nIf the size argument is negative or omitted, read until EOF is reached.\nNotice that when in non-blocking mode, less data than what was requested\nmay be returned, even if no size parameter was given.',
'ret_type': [('', '')]
}]
)
r = get_overloads_from_doc_string(
'read([size]) -> read at most size bytes, returned as a string.\n\n'
'If the size argument is negative or omitted, read until EOF is reached.\n'
'Notice that when in non-blocking mode, less data than what was requested\n'
'may be returned, even if no size parameter was given.',
__builtins__,
None,
'read'
)
self.assertEqual(
r,
[{
'args': ({'default_value': 'None', 'name': 'size'},),
'doc': 'read at most size bytes, returned as a string.\n\nIf the size argument is negative or omitted, read until EOF is reached.\nNotice that when in non-blocking mode, less data than what was requested\nmay be returned, even if no size parameter was given.',
'ret_type': [('', '')]
}],
repr(r)
)
def test_new(self):
self.check_doc_str(
'T.__new__(S, ...) -> a new object with type S, a subtype of T',
'struct',
'__new__',
[{
'ret_type': [('', '')],
'doc': 'a new object with type S, a subtype of T',
'args': ({'name': 'S'}, {'arg_format': '*', 'name': 'args'})
}]
)
def test_C_prototype(self):
self.check_doc_str(
'GetDriverByName(char const * name) -> Driver',
'',
'GetDriverByName',
[{
'ret_type': [('', 'Driver')],
'doc': '',
'args': ({'name': 'name', 'type': [(BUILTIN, 'str')]},),
}]
)
def test_chmod(self):
self.check_doc_str(
'chmod(path, mode, *, dir_fd=None, follow_symlinks=True)',
'nt',
'chmod',
[{
'doc': '',
'args': (
{'name': 'path'},
{'name': 'mode'},
{'name': 'args', 'arg_format': '*'},
{'name': 'dir_fd', 'default_value': 'None'},
{'name': 'follow_symlinks', 'default_value': 'True'}
)
}]
)
def test_open(self):
if sys.version_info[0] >= 3:
expect_ret_type = ('_io', '_IOBase')
else:
expect_ret_type = (BUILTIN, 'file')
self.check_doc_str(
'open(file, mode=\'r\', buffering=-1, encoding=None,\n' +
' errors=None, newline=None, closefd=True, opener=None)' +
' -> file object\n\nOpen file',
BUILTIN,
'open',
[{
'doc': 'Open file',
'ret_type': [expect_ret_type],
'args': (
{'name': 'file'},
{'name': 'mode', 'default_value': "'r'"},
{'name': 'buffering', 'default_value': '-1'},
{'name': 'encoding', 'default_value': 'None'},
{'name': 'errors', 'default_value': 'None'},
{'name': 'newline', 'default_value': 'None'},
{'name': 'closefd', 'default_value': 'True'},
{'name': 'opener', 'default_value': 'None'},
)
}]
)
def test_optional_with_default(self):
self.check_doc_str(
'max(iterable[, key=func]) -> value',
BUILTIN,
'max',
[{
'doc': '',
'ret_type': [('', 'value')],
'args': (
{'name': 'iterable'},
{'name': 'key', 'default_value': 'func'}
)
}]
)
def test_pyplot_figure(self):
pyplot_doc = """
Creates a new figure.
Parameters
----------
num : integer or string, optional, default: none
If not provided, a new figure will be created, and a the figure number
will be increamted. The figure objects holds this number in a `number`
attribute.
If num is provided, and a figure with this id already exists, make
it active, and returns a reference to it. If this figure does not
exists, create it and returns it.
If num is a string, the window title will be set to this figure's
`num`.
figsize : tuple of integers, optional, default : None
width, height in inches. If not provided, defaults to rc
figure.figsize.
dpi : integer, optional, default ; None
resolution of the figure. If not provided, defaults to rc figure.dpi.
facecolor :
the background color; If not provided, defaults to rc figure.facecolor
edgecolor :
the border color. If not provided, defaults to rc figure.edgecolor
Returns
-------
figure : Figure
The Figure instance returned will also be passed to new_figure_manager
in the backends, which allows to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed to the figure init
function.
Note
----
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
rcParams defines the default values, which can be modified in the
matplotlibrc file
"""
self.check_doc_str(
pyplot_doc,
'matplotlib.pyplot',
'figure',
[{
'doc': pyplot_doc,
'ret_type': [('', 'Figure')],
'args': (
{'name': 'args', 'arg_format': '*'},
{'name': 'kwargs', 'arg_format': '**'}
)
}]
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
spallavolu/scikit-learn | sklearn/grid_search.py | 61 | 37197 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
datoszs/czech-lawyers | externals/nss_crawler.py | 1 | 29783 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# coding=utf-8
"""
Crawler of Czech Republic The Supreme Administrative Court.
Downloads HTML files, PDF files and produces CSV file with results
"""
__author__ = "Radim Jílek"
__copyright__ = "Copyright 2016, DATOS - data o spravedlnosti, z.s."
__license__ = "GNU GPL"
import codecs
import csv
import json
import logging
import math
import os
import re
import shutil
import subprocess
import sys
from collections import OrderedDict
from datetime import datetime
from optparse import OptionParser
from os.path import join
from urllib.parse import urljoin
import pandas as pd
from bs4 import BeautifulSoup
from ghost import Ghost
from tqdm import tqdm
base_url = "http://nssoud.cz/"
url = "http://nssoud.cz/main0Col.aspx?cls=JudikaturaBasicSearch&pageSource=0"
hash_id = datetime.now().strftime("%d-%m-%Y")
working_dir = "working"
screens_dir = "screens_" + hash_id
documents_dir = "documents"
txt_dir = "txt"
html_dir = "html"
log_dir = "log_nss"
logger, writer_records, ghost, session, list_of_links = (None,) * 5
main_timeout = 100000
global_ncols = 90
saved_pages = 0
saved_records = 0
# set view progress bar and view browser window
#progress, view = (False, False)
b_screens = False # capture screenshots?
# precompile regex
p_re_records = re.compile(r'(\d+)$')
p_re_decisions = re.compile(r'[a-z<>]{4}\s+(.+)\s+')
#
# service functions
#
def set_logging():
"""
settings of logging
"""
global logger
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
fh_d = logging.FileHandler(join(log_dir, __file__[0:-3] + "_" + hash_id + "_log_debug.txt"),
mode="w", encoding='utf-8')
fh_d.setLevel(logging.DEBUG)
fh_i = logging.FileHandler(join(log_dir, __file__[0:-3] + "_" + hash_id + "_log.txt"),
mode="w", encoding='utf-8')
fh_i.setLevel(logging.INFO)
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(u'%(asctime)s - %(funcName)-20s - %(levelname)-5s: %(message)s',
datefmt='%H:%M:%S')
ch.setFormatter(formatter)
fh_d.setFormatter(formatter)
fh_i.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh_d)
logger.addHandler(fh_i)
def create_directories():
"""
create working directories
"""
for directory in [out_dir, documents_dir_path, html_dir_path, result_dir_path]:
os.makedirs(directory, exist_ok=True)
logger.info("Folder was created '" + directory + "'")
if b_screens:
screens_dir_path = join(join(out_dir, ".."), screens_dir)
if os.path.exists(screens_dir_path):
logger.debug("Erasing old screens")
shutil.rmtree(screens_dir_path)
os.mkdir(screens_dir_path)
logger.info("Folder was created '{}'".format(screens_dir_path))
return screens_dir_path
def clean_directory(root):
"""
clear directory (after successfully run)
:param root: path to directory
"""
for f in os.listdir(root):
try:
shutil.rmtree(join(root, f))
except NotADirectoryError:
os.remove(join(root, f))
def logging_process(arguments):
"""
settings logging for subprocess
"""
p = subprocess.Popen(arguments, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stdout:
logger.debug("%s" % stdout)
if stderr:
logger.debug("%s" % stderr)
def parameters():
"""
:return: dict with all options
"""
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-w", "--without-download", action="store_true", dest="download", default=False,
help="Not download PDF documents")
parser.add_option("-n", "--not-delete", action="store_true", dest="delete", default=False,
help="Not delete working directory")
parser.add_option("-d", "--output-directory", action="store", type="string", dest="dir", default="output_dir",
help="Path to output directory")
parser.add_option("-l", "--last-days", action="store", type="string", dest="last", default=None,
help="Increments for the last N days (N in <1,60>)")
parser.add_option("-f", "--date-from", action="store", type="string", dest="date_from", default="1. 1. 2006",
help="Start date of range (d. m. yyyy)")
parser.add_option("-t", "--date-to", action="store", type="string", dest="date_to", default=None,
help="End date of range (d. m. yyyy)")
parser.add_option("-c", "--capture", action="store_true", dest="screens", default=False,
help="Capture screenshots?")
parser.add_option("-o", "--output-file", action="store", type="string", dest="filename", default="metadata.csv",
help="Name of output CSV file")
parser.add_option("-e", "--extraction", action="store_true", dest="extraction", default=False,
help="Make only extraction without download new data")
parser.add_option("--progress-bar", action="store_true", dest="progress", default=False,
help="Show progress bar during operations")
parser.add_option("--view", action="store_true", dest="view", default=False,
help="View window during operations")
parser.add_option("-s", "--select-only", action="store", type="string", dest="selection", default=None,
help="Download only cases from selection list (file or input string with '|' as delimiter")
(options, args) = parser.parse_args()
options = vars(options)
return options
#
# help functions
#
def make_json(collection):
"""
make correct json format
:param collection:
:return: JSON
"""
return json.dumps(dict(zip(range(1, len(collection) + 1), collection)), sort_keys=True,
ensure_ascii=False)
def make_soup(path):
"""
make BeautifulSoup object Soup from input HTML file
:param path: path to HTMl file
:return: BeautifulSoup object Soup
"""
soup = BeautifulSoup(codecs.open(path, encoding="utf-8"), "html.parser")
return soup
def how_many(str_info, displayed_records):
"""
Find number of records and compute count of pages.
:type str_info: string
:param str_info: info element as string
:type displayed_records: int
:param displayed_records: number of displayed records
"""
m = p_re_records.search(str_info)
number_of_records = m.group(1)
count_of_pages = math.ceil(int(number_of_records) / int(displayed_records))
logger.info("records: %s => pages: %s", number_of_records, count_of_pages)
return number_of_records, count_of_pages
def first_page():
"""
go to first page on find query
"""
session.click(
"#_ctl0_ContentPlaceMasterPage__ctl0_pnPaging1_Repeater2__ctl0_Linkbutton2", expect_loading=True)
def extract_data(response, html_file):
"""
save current page as HTML file for later extraction
:type response: string
:param response: HTML code for saving
:type html_file: string
:param html_file: name of saving file
"""
logger.debug("Save file '%s'" % html_file)
with codecs.open(join(html_dir_path, html_file), "w", encoding="utf-8") as f:
f.write(response)
def load_data(csv_file):
"""
load data from csv file
:param csv_file: name of CSV file
"""
data = pd.read_csv(csv_file, sep=";", na_values=['', "nan"])
return data
def download_pdf(data):
"""
download PDF files
:type data: DataFrame
:param data: Pandas object
"""
frame = data[["web_path", "local_path"]].dropna()
t = frame.itertuples()
if progress:
t = tqdm(t, ncols=global_ncols) # view progress bar
for row in t:
filename = row[2]
if not os.path.exists(join(documents_dir_path, filename)):
logging_process(
["curl", row[1], "-o", join(documents_dir_path, filename)])
if progress:
t.update() # update progress bar
def prepare_list(selection):
list_of_marks = []
if selection[-4] is '.':
data = load_data(selection)
frame = data["registry_mark"].dropna()
elif "|" in selection:
frame = selection.split('|')
else:
raise AttributeError("Input data are in wrong format!")
for row in frame:
tails = row.split('-')
mark = tails[0].strip()
number = tails[-1].strip()
list_of_marks.append((mark, number if number != mark else None))
return list_of_marks
def get_filename(registry_mark, order_number, extension):
return "{}-{}.{}".format(registry_mark.replace('/', '-'), order_number, extension).replace(' ', '_')
#
# process functions
#
def make_record(soup):
"""
extract relevant data from page
:param soup: bs4 soup object
"""
table = soup.find("table", id="_ctl0_ContentPlaceMasterPage__ctl0_grwA")
rows = table.findAll("tr")
logger.debug("Records on pages: %d" % len(rows[1:]))
count_records_with_document = 0
for record in rows[1:]:
columns = record.findAll("td") # columns of table in the row
case_number = columns[1].getText().replace("\n", '').strip()
# extract decision results
decisions_str = str(columns[2]).replace("\n", '').strip()
m = p_re_decisions.search(decisions_str)
line = m.group(1)
decision_result = [x.replace('\"', '\'').strip()
for x in re.split("</?br/?>", line)]
decisions = ""
if len(decision_result) > 1:
decisions = "|".join(decision_result[1:])
form_decision = decision_result[0]
else:
form_decision = decision_result[0]
decision_result = decisions
link_elem = columns[1].select_one('a[href*=SOUDNI_VYKON]')
# link to the decision's document
if link_elem is not None:
link = link_elem['href']
link = urljoin(base_url, link)
count_records_with_document += 1
else:
link = None
#continue # case without document
# registry mark isn't case number
mark = case_number.split("-")[0].strip()
court = columns[3].getText().replace("\n", '').strip()
str_date = columns[4].getText().replace("\n", '').strip()
# extract sides to list
sides = []
for side in columns[5].contents:
if side.string is not None:
text = side.string.strip().replace('"', "'")
if text != '':
sides.append(text)
else:
sides.extend([x.strip().replace('"', "'")
for x in re.split(r"</?br/?>", str(side)) if x != ''])
complaint = columns[6].getText().strip()
prejudicate = [x.text.strip() for x in columns[7].findAll("a")]
prejudicate = [x for x in prejudicate if x != '']
date = [x.strip() for x in str_date.split("/ ")]
if len(date) >= 1:
date = date[0]
# convert date from format dd.mm.YYYY to YYYY-mm-dd
date = datetime.strptime(date, '%d.%m.%Y').strftime('%Y-%m-%d')
case_year = mark.split('/')[-1]
filename = "" if link is None else os.path.basename(link)
logger.debug(
"Contents: {}\nSides: {}; Complaint: {}; Year: {}; Prejudicate: {}\n{}".format(columns[5].contents,
sides,
complaint, case_year,
prejudicate, link))
item = {
"registry_mark": mark,
"record_id" : case_number,
"decision_date": date,
"court_name" : court,
"web_path" : link,
"local_path" : filename,
"decision_type": form_decision,
"decision" : decision_result,
"order_number" : case_number,
"sides" : make_json(sides) if len(sides) else None,
"prejudicate" : make_json(prejudicate) if len(prejudicate) else None,
"complaint" : complaint if len(complaint) else None,
"case_year" : case_year
}
if selection and link is None:
continue
writer_records.writerow(item) # write item to CSV
logger.debug(case_number)
logger.debug("Find %s records with document on this page" %
count_records_with_document)
return count_records_with_document
def extract_information(saved_pages, extract=None):
"""
extract informations from HTML files and write to CSVs
:param saved_pages: number of all saved pages
:type extract: bool
:param extract: flag which indicates type of extraction
"""
html_files = [join(html_dir_path, fn)
for fn in next(os.walk(html_dir_path))[2]]
if len(html_files) == saved_pages or extract:
global writer_records
fieldnames = ['court_name', 'record_id', 'registry_mark', 'decision_date',
'web_path', 'local_path', 'decision_type', 'decision',
'order_number', 'sides', 'complaint', 'prejudicate', 'case_year']
csv_records = open(join(out_dir, output_file), 'w',
newline='', encoding="utf-8")
writer_records = csv.DictWriter(
csv_records, fieldnames=fieldnames, delimiter=";", quoting=csv.QUOTE_ALL)
writer_records.writeheader()
t = html_files
count_documents = 0
if progress:
t = tqdm(t, ncols=global_ncols)
for html_f in t:
logger.debug(html_f)
count_documents += make_record(make_soup(html_f))
logger.info("%s records had a document" % count_documents)
csv_records.close()
else:
logger.warning("Count of 'saved_pages'({}) and saved files({}) is differrent!".format(
saved_pages, len(html_files)))
def view_data(row_count, mark_type, value, date_from=None, date_to=None, last=None):
"""
sets forms parameters for viewing data
:param row_count: haw many record would be showing on page
:param last: how many days ago
:type mark_type: text
:param mark_type: text identificator of mark type
:param value: mark type number identificator for formular
:param date_from: start date of range
:param date_to: end date of range
"""
if last and session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_chkPrirustky"):
logger.debug("Select check button")
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_chkPrirustky", True)
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_ddlPosledniDny"):
logger.info("Select last %s days" % last)
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_ddlPosledniDny", last)
else:
if date_from is not None:
# setting range search
logger.info("Records from the period %s -> %s", date_from, date_to)
# id (input - text) = _ctl0_ContentPlaceMasterPage__ctl0_txtDatumOd
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_txtDatumOd"):
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_txtDatumOd", date_from)
if date_to is not None:
# id (input - text) = _ctl0_ContentPlaceMasterPage__ctl0_txtDatumDo
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_txtDatumDo"):
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_txtDatumDo", date_to)
# shows several first records
# change mark type in select
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_ddlRejstrik"):
logger.debug("Change mark type - %s", mark_type)
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_ddlRejstrik", value)
# time.sleep(1)
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_ddlSortName"):
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_ddlSortName", "2")
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_ddlSortDirection", "0")
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_rbTypDatum_1"):
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_rbTypDatum_1", True)
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_btnFind"): # click on find button
logger.debug("Click - find")
session.click(
"#_ctl0_ContentPlaceMasterPage__ctl0_btnFind", expect_loading=True)
if b_screens:
logger.debug("\t_find_screen_" + mark_type + ".png")
session.capture_to(
join(screens_dir_path, "_find_screen_" + mark_type + ".png"))
# change value of row count on page
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_ddlRowCount"):
value, resources = session.evaluate(
"document.getElementById('_ctl0_ContentPlaceMasterPage__ctl0_ddlRowCount').value")
#print("value != '30'",value != "30")
if value != "30":
logger.debug("Change row count")
session.set_field_value(
"#_ctl0_ContentPlaceMasterPage__ctl0_ddlRowCount", str(row_count))
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_btnChangeCount"):
logger.debug("Click - Change")
result, resources = session.click("#_ctl0_ContentPlaceMasterPage__ctl0_btnChangeCount",
expect_loading=True)
if b_screens:
logger.debug("\tfind_screen_" + mark_type +
"_change_row_count.png")
session.capture_to(
join(screens_dir_path, "/_find_screen_" + mark_type + "_change_row_count.png"))
def view_data_by_order_number(registry_mark, order_number):
if registry_mark is not None:
# setting registry mark field
logger.debug("Type registry mark: " + registry_mark)
# id (input - text) = _ctl0_ContentPlaceMasterPage__ctl0_txtDatumOd
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_txtSpisovaZnackaFull"):
session.set_field_value("#_ctl0_ContentPlaceMasterPage__ctl0_txtSpisovaZnackaFull", registry_mark)
if order_number is not None:
# setting registry mark field
logger.debug("Type order number: " + order_number)
# id (input - text) = _ctl0_ContentPlaceMasterPage__ctl0_txtDatumOd
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_txtCisloJednaci"):
session.set_field_value("#_ctl0_ContentPlaceMasterPage__ctl0_txtCisloJednaci", order_number)
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_btnFind"): # click on find button
logger.debug("Click - find")
session.click("#_ctl0_ContentPlaceMasterPage__ctl0_btnFind", expect_loading=True)
if b_screens:
filename = "_find_screen_{}".format(get_filename(registry_mark, order_number, extension="png"))
logger.debug("\t" + filename)
session.capture_to(join(screens_dir_path, filename))
def walk_pages(count_of_pages, case_type):
"""
make a walk through pages of results
:param count_of_pages: over how many pages we have to go
:param case_type: name of type for easier identification of problem
"""
last_file = str(count_of_pages) + "_" + case_type + ".html"
if os.path.exists(join(html_dir_path, last_file)):
logger.debug("Skip %s type <-- '%s' exists" % (case_type, last_file))
return True
logger.debug("count_of_pages: %d", count_of_pages)
positions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
t = range(1, count_of_pages + 1)
if progress:
t = tqdm(t, ncols=global_ncols) # progress progress bar
for i in t: # walk pages
response = session.content
#soup = BeautifulSoup(response,"html.parser")
html_file = str(i) + "_" + case_type + ".html"
if not os.path.exists(join(html_dir_path, html_file)):
if session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_grwA"):
extract_data(response, html_file)
pass
else:
logger.debug("Skip file '%s'" % html_file)
# TO DO - danger
if i >= 12 and count_of_pages > 22:
logger.debug("(%d) - %d < 10 --> %s <== (count_of_pages ) - (i) < 10 = Boolean", count_of_pages, i,
count_of_pages - i < 10)
# special compute for last pages
if count_of_pages - (i + 1) < 10:
logger.debug(
"positions[(i-(count_of_pages))] = %d", positions[(i - count_of_pages)])
page_number = str(positions[(i - count_of_pages)] + 12)
else:
page_number = "12" # next page element has constant ID
else:
page_number = str(i + 1) # few first pages
logger.debug("Number = %s", page_number)
if b_screens:
session.capture_to(join(screens_dir_path, "find_screen_" + case_type + "_0" + str(i) + ".png"), None,
selector="#pagingBox0")
if session.exists(
"#_ctl0_ContentPlaceMasterPage__ctl0_pnPaging1_Repeater2__ctl" + page_number + "_LinkButton1") and i + 1 < (
count_of_pages + 1):
#link_id = "_ctl0_ContentPlaceMasterPage__ctl0_pnPaging1_Repeater2__ctl"+page_number+"_LinkButton1"
link = "_ctl0:ContentPlaceMasterPage:_ctl0:pnPaging1:Repeater2:_ctl" + \
page_number + ":LinkButton1"
logger.debug("\tGo to next - Page %d (%s)", (i + 1), link)
try:
# result, resources = session.click("#"+link_id,
# expect_loading=True)
session.evaluate(
"WebForm_DoPostBackWithOptions(new WebForm_PostBackOptions(\"%s\", \"\", true, \"\", \"\", false, true))" % link,
expect_loading=True)
#session.wait_for(page_has_loaded,"Timeout - next page",timeout=main_timeout)
logger.debug("New page was loaded!")
except Exception:
logger.error(
"Error (walk_pages) - close browser", exc_info=True)
logger.debug("error_(" + str(i + 1) + ").png")
session.capture_to(
join(screens_dir_path, "error_(" + str(i + 1) + ").png"))
return False
return True
def process_court():
"""
creates files for processing and saving data, start point for processing
"""
d = {"Ads": '10', "Afs": '11', "Ars": '116', "As": '12',
"Azs": '9', "Aos": '115', "Ans": '13', "Aps": '14'}
#d = {"As" : '12'}
case_types = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
row_count = 20
global saved_pages
global saved_records
for case_type in case_types.keys():
logger.info("-----------------------------------------------------")
logger.info(case_type)
view_data(row_count, case_type, case_types[
case_type], date_from=date_from, date_to=date_to, last=last)
number_of_records, resources = session.evaluate(
"document.getElementById('_ctl0_ContentPlaceMasterPage__ctl0_ddlRowCount').value")
# number_of_records = "30" #hack pro testovani
if number_of_records is not None and int(number_of_records) != row_count:
logger.warning(int(number_of_records) != row_count)
logger.error("Failed to display data")
if b_screens:
logger.debug("error_" + case_type + ".png")
session.capture_to(
join(screens_dir_path, "error_" + case_type + ".png"))
return False
# my_result = session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_pnPaging1_Repeater3__ctl0_Label2")
#print (my_result)
if not session.exists("#_ctl0_ContentPlaceMasterPage__ctl0_pnPaging1_Repeater3__ctl0_Label2"):
logger.info("No records")
continue
info_elem, resources = session.evaluate(
"document.getElementById('_ctl0_ContentPlaceMasterPage__ctl0_pnPaging1_Repeater3__ctl0_Label2').innerHTML")
if info_elem:
# number_of_records = "20" #hack pro testovani
str_info = info_elem.replace("<b>", "").replace("</b>", "")
number_of_records, count_of_pages = how_many(
str_info, number_of_records)
else:
return False
# testing
# if count_of_pages >=5:
# count_of_pages = 5
result = walk_pages(count_of_pages, case_type)
saved_pages += count_of_pages
saved_records += int(number_of_records)
if not result:
logger.warning("Result of 'walk_pages' is False")
return False
first_page()
return True
def process_selection():
registry_marks = prepare_list(selection)
global saved_pages
global saved_records
for registry_mark, order_number in registry_marks:
html_file_name = get_filename(registry_mark, order_number, extension="html")
view_data_by_order_number(registry_mark, order_number)
if not os.path.exists(join(html_dir_path, html_file_name)):
extract_data(session.content, html_file_name)
saved_pages += 1
saved_records = saved_pages
return True
def main():
"""
main function of this program
:return:
"""
global ghost
ghost = Ghost()
global session
session = ghost.start(
download_images=False, show_scrollbars=False,
wait_timeout=main_timeout, display=False,
plugins_enabled=False)
logger.info(u"Start - NSS")
if view:
session.display = True
session.show()
session.open(url)
if b_screens:
logger.debug("_screen.png")
session.capture_to(join(screens_dir_path, "_screen.png"))
logger.debug("=" * 20)
logger.info("Download records")
if selection:
result = process_selection()
else:
result = process_court()
# print(result)
if result:
logger.info("DONE - download records")
logger.debug("Closing browser")
logger.info("It was saved {} records on {} pages".format(
saved_records, saved_pages))
# input(":-)")
logger.debug("=" * 20)
logger.info("Extract informations")
extract_information(saved_pages)
logger.info("DONE - extraction")
logger.debug("=" * 20)
if not b_download: # debug without download
logger.info("Download original files")
data = load_data(join(out_dir, output_file))
download_pdf(data)
logger.info("DONE - Download files")
else:
logger.error("Error (main)- closing browser, exiting")
return False
return True
if __name__ == "__main__":
options = parameters()
out_dir = options["dir"]
b_download = options["download"]
date_from = options["date_from"]
date_to = options["date_to"]
b_screens = options["screens"]
b_delete = options["delete"]
last = options["last"]
output_file = options["filename"]
progress = options["progress"]
view = options["view"]
selection = options["selection"]
if ".csv" not in output_file:
output_file += ".csv"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
print("Folder was created '" + out_dir + "'")
set_logging()
logger.info(hash_id)
logger.debug(options)
result_dir_path = join(out_dir, "result")
out_dir = join(out_dir, working_dir) # new outdir is working directory
documents_dir_path = join(out_dir, documents_dir)
html_dir_path = join(out_dir, html_dir)
# result_dir_path = os.path.normpath(join(out_dir,join(os.pardir,"result")))
screens_dir_path = create_directories()
if options["extraction"]:
logger.info("Only extract informations")
extract_information(saved_pages, extract=True)
logger.info("DONE - extraction")
logger.info("Moving files")
shutil.copy(join(out_dir, output_file), result_dir_path)
else:
if main():
# move results of crawling
if not os.listdir(result_dir_path):
logger.info("Moving files")
shutil.move(documents_dir_path, result_dir_path)
shutil.move(join(out_dir, output_file), result_dir_path)
if not b_delete: # debug without cleaning
logger.info("Cleaning working directory")
clean_directory(out_dir)
else:
logger.error("Result directory isn't empty.")
sys.exit(-1)
sys.exit(0)
else:
sys.exit(-1)
| gpl-3.0 |
zpostone/zpostone.github.io | drafts/attempt.py | 1 | 2191 | from bs4 import BeautifulSoup
import requests
import requests.exceptions
import urllib
import urllib.parse
from collections import deque
import re
import sys
import pandas as pd
df_urls = pd.DataFrame()
df_urls = pd.read_table('urls_7.csv', sep=',')
df_test = df_urls.head(n=10)
for index, row in df_test.iterrows():
url = (row['Links'])
#new_urls = deque([inputurl])
# a set of urls that we have already crawled
processed_urls = set()
# a set of crawled emails
emails = set()
# process urls one by one until we exhaust the queue
# extract base url to resolve relative links
parts = urllib.parse.urlsplit(url)
base_url = "{0.scheme}://{0.netloc}".format(parts)
path = url[:url.rfind('/')+1] if '/' in parts.path else url
# path is full path... http://...
#get url's content
#print("Processing %s" % url)
try:
response = requests.get(url)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
# ignore pages with errors
continue
# extract all email addresses and add them into the resulting set
new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", response.text, re.I))
emails.update(new_emails)
# create a beautiful soup for the html document
soup = BeautifulSoup(response.text, 'html.parser')
# find and process all the anchors in the document
for anchor in soup.find_all("a"):
# extract link url from the anchor
link = anchor.attrs["href"] if "href" in anchor.attrs else ''
# resolve relative links
if link.startswith('mailto'):
emails.update(link)
break
elif link.startswith('/'):
link = base_url + link
elif not link.startswith(url):
break
# add the new url to the queue if it was not enqueued nor processed yet
#if not link in new_urls and not link in processed_urls:
#new_urls.append(link)
new_emailsStr = str(new_emails)
df_test.set_value(index, 'Email', new_emailsStr)
#df_test.set_value(index, 'MAILTOS', mailtos)
#print new_urls
df_test.to_csv('EXPORT.csv')
#df_test | mit |
untom/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 25 | 1866 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
Moonlock/DiceTest | GUI/main.py | 1 | 17004 | from PIL import Image, ImageTk
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
import os
import smtplib
import socket
from cycler import cycler
from matplotlib import style
from matplotlib import ticker
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy
import Tkinter as tk
import config
from dice import DiceSet
style.use('seaborn-whitegrid')
matplotlib.use("TkAgg")
SUBTITLE_FONT = ("Helvetica", 30)
HEADER_FONT = ("Helvetica", 20, "bold")
TEXT_FONT = ("Helvetica", 20)
INPUT_FONT = ("Helvetica", 12)
class Main(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.filename = None
self.players = self.controller.players
self.dice = DiceSet()
self.redDie = tk.IntVar()
self.yellowDie = tk.IntVar()
self.graphType = tk.StringVar(value="Combined")
self.graphDisplay = tk.StringVar(value="Number")
self.resultsType = tk.StringVar(value="All Players")
def toDict(self):
return {"dice": self.dice.toDict(), "players": [player.toDict() for player in self.controller.players]}
def startNewGame(self):
self.curPlayer = self.players[0]
self.graphType = tk.StringVar(value="Combined")
self.graphDisplay = tk.StringVar(value="Number")
self.resultsType = tk.StringVar(value="All Players")
self.createTitleFrame()
leftFrame = tk.Frame(self)
leftFrame.pack(side="left", expand=True, fill="both", pady=(self.controller.HEIGHT/16, 0))
self.createInputFrame(leftFrame)
colourList = [player.colour for player in self.players]
matplotlib.rcParams.update({'axes.prop_cycle': cycler('color', colourList)})
graphOptions = ["Combined", "Separate", "Cycle Players", "----------"]
graphOptions.extend([p.name for p in self.controller.players])
rightFrame = tk.Frame(self)
rightFrame.pack(side="left", expand=True, fill="both")
self.createGraphFrame(rightFrame, graphOptions, 3, self.updateNewGraph)
def startLoadGame(self):
self.groups = self.controller.groups
self.graphType = tk.StringVar(value="All")
self.graphDisplay = tk.StringVar(value="Percent")
self.resultsType = tk.StringVar(value=self.groups.keys()[0])
self.createTitleFrame(False)
resultsFrame = tk.Frame(self)
resultsFrame.pack(side="left")
resultsOptions = []
resultsOptions.extend([groupName for groupName in self.groups])
resultsMenu = tk.OptionMenu(resultsFrame, self.resultsType, *resultsOptions, command=self.changeLoadResults)
resultsMenu.grid(row=0, column=0, columnspan=3)
resultsMenu.config(font=TEXT_FONT, width=12, anchor="w")
resultsMenu['menu'].config(font=TEXT_FONT)
self.results = tk.Canvas(resultsFrame, relief="ridge", bd=2, width=self.controller.WIDTH*2/5, height=self.controller.HEIGHT/2)
self.results.grid(row=1, column=0, columnspan=3, pady=(0, 20), padx=50)
self.changeLoadResults(self.resultsType.get())
graphOptions = ["All", "----------"]
graphOptions.extend([groupName for groupName in self.groups])
rightFrame = tk.Frame(self)
rightFrame.pack(side="left", expand=True, fill="both")
self.createGraphFrame(rightFrame, graphOptions, 1, self.updateLoadGraph)
def createTitleFrame(self, newGame=True):
titleFrame = tk.Frame(self)
titleFrame.pack(side="top", fill="x", anchor="n")
if newGame:
self.rollNum = tk.Label(titleFrame, text="ROLL 0", font=SUBTITLE_FONT)
self.rollNum.pack(side="top")
self.turnName = tk.Label(titleFrame, text=self.curPlayer.name, font=SUBTITLE_FONT)
self.turnName.pack(side="top", pady=(0, self.controller.HEIGHT/32))
else:
tk.Label(titleFrame, text="Compare Dice", font=SUBTITLE_FONT, height=2).pack(side="top")
tk.Button(titleFrame, text="Menu", font=TEXT_FONT, width=10, relief="groove", bd=2, command=lambda: self.controller.showFrame("MainMenu")
).place(x=20, y=20)
def createInputFrame(self, leftFrame):
inputFrame = tk.Frame(leftFrame)
inputFrame.pack()
self.createDiceFrames(inputFrame)
buttonFrame = tk.Frame(inputFrame)
buttonFrame.pack(side="top", pady=(0, self.controller.HEIGHT/8))
tk.Button(buttonFrame, text="Submit", font=TEXT_FONT, width=10, relief="groove", bd=2, command=self.submit
).grid(row=0, column=0, columnspan=2, pady=(0, 40))
tk.Button(buttonFrame, text="Skip Turn", font=TEXT_FONT, width=10, relief="groove", bd=2, command=self.endTurn
).grid(row=1, column=0, padx=20)
tk.Button(buttonFrame, text="End", font=TEXT_FONT, width=10, relief="groove", bd=2, command=lambda: self.end(inputFrame, leftFrame)
).grid(row=1, column=1, padx=20)
self.createAveragesTable(inputFrame)
def createDiceFrames(self, inputFrame):
redFrame = tk.Frame(inputFrame)
redFrame.pack(side="top", pady=(0, 20))
redFrame.images = []
yellowFrame = tk.Frame(inputFrame)
yellowFrame.pack(side="top", pady=(0, 20))
yellowFrame.images = []
for i in xrange(1, 7):
redDie = Image.open("images/red" + str(i) + ".png")
redFrame.images.append(ImageTk.PhotoImage(redDie))
tk.Radiobutton(redFrame, image=redFrame.images[i-1], variable=self.redDie, value=i, indicatoron=False, font=TEXT_FONT, height=44, width=44, offrelief="groove", bd=2
).pack(side="left", padx=(20, 0))
yellowDie = Image.open("images/yellow" + str(i) + ".png")
yellowFrame.images.append(ImageTk.PhotoImage(yellowDie))
tk.Radiobutton(yellowFrame, image=yellowFrame.images[i-1], variable=self.yellowDie, value=i, indicatoron=False, font=TEXT_FONT, height=44, width=44, offrelief="groove", bd=2
).pack(side="left", padx=(20, 0))
def createAveragesTable(self, inputFrame):
tableFrame = tk.Frame(inputFrame, bd=2, relief="sunken")
tableFrame.pack(side="top")
titleFrame = tk.Frame(tableFrame)
titleFrame.pack(side="top")
tk.Label(titleFrame, text="", font=TEXT_FONT, bd=1, relief="ridge", width=15).grid(row=0, column=0)
tk.Label(titleFrame, text="Red Die", font=HEADER_FONT, bd=1, relief="ridge", width=15).grid(row=0, column=1)
tk.Label(titleFrame, text="Yellow Die", font=HEADER_FONT, bd=1, relief="ridge", width=15).grid(row=0, column=2)
tk.Label(titleFrame, text="Combined Dice", font=HEADER_FONT, bd=1, relief="ridge", width=15).grid(row=0, column=3)
self.allPlayersTableRow = self.createRow("All Players", tableFrame)
for player in self.controller.players:
player.tableRow = self.createRow(player.name, tableFrame)
def createRow(self, text, tableFrame):
row = tk.Frame(tableFrame)
row.pack(side="top")
tk.Label(row, text=text, font=HEADER_FONT, bd=1, relief="ridge", width=15).grid(row=1, column=0)
row.red = tk.Label(row, text="0", font=TEXT_FONT, bd=1, relief="ridge", width=15)
row.yellow = tk.Label(row, text="0", font=TEXT_FONT, bd=1, relief="ridge", width=15)
row.combined = tk.Label(row, text="0", font=TEXT_FONT, bd=1, relief="ridge", width=15)
row.red.grid(row=1, column=1)
row.yellow.grid(row=1, column=2)
row.combined.grid(row=1, column=3)
return row
def createGraphFrame(self, rightFrame, graphOptions, dividerIndex, graphFunction):
graphFrame = tk.Frame(rightFrame)
graphFrame.pack(side="top")
graphMenu = tk.OptionMenu(graphFrame, self.graphType, *graphOptions, command=graphFunction)
graphMenu.grid(row=0, column=0, columnspan=2)
graphMenu.config(font=TEXT_FONT, width=12, anchor="w")
graphMenu['menu'].config(font=TEXT_FONT)
graphMenu['menu'].entryconfigure(dividerIndex, state="disabled")
self.createGraph(graphFrame)
graphFunction()
tk.Radiobutton(graphFrame, text="%", variable=self.graphDisplay, value="Percent", indicatoron=False, font=TEXT_FONT, width=10, offrelief="groove", bd=2, command=graphFunction
).grid(row=2, column=0)
tk.Radiobutton(graphFrame, text="#", variable=self.graphDisplay, value="Number", indicatoron=False, font=TEXT_FONT, width=10, offrelief="groove", bd=2, command=graphFunction
).grid(row=2, column=1)
def createGraph(self, frame):
graphFrame = tk.Frame(frame, bd=3, relief="ridge")
graphFrame.grid(row=1, column=0, columnspan=2, pady=10)
matplotlib.rcParams.update({'font.size': 20})
matplotlib.rcParams.update({'axes.grid.axis': 'y'})
f = Figure(figsize=(9,7))
self.combinedGraph = f.add_subplot(2,1,1)
self.redGraph = f.add_subplot(2,2,3)
self.yellowGraph = f.add_subplot(2,2,4)
self.canvas = FigureCanvasTkAgg(f, graphFrame)
self.canvas.show()
self.canvas.get_tk_widget().pack()
def submit(self):
red, yellow = self.redDie.get(), self.yellowDie.get()
if red and yellow:
self.dice.addRoll(red, yellow)
self.curPlayer.dice.addRoll(red, yellow)
self.redDie.set(0)
self.yellowDie.set(0)
self.updateAverages(self.allPlayersTableRow, self.dice)
self.updateAverages(self.curPlayer.tableRow, self.curPlayer.dice)
self.endTurn()
self.updateNewGraph()
def updateAverages(self, row, diceSet):
redAvg, yellowAvg, combined = diceSet.getAverages()
row.red.config(text=str(redAvg))
row.yellow.config(text=str(yellowAvg))
row.combined.config(text=str(combined))
def updateNewGraph(self, val=None):
graphType = self.graphType.get()
if graphType == "Combined":
self.updateGraph(self.updateCombinedGraph)
elif graphType == "Separate":
self.updateGraph(self.updateSeparatedGraph)
elif graphType == "Cycle Players":
self.updateGraph(self.updateCombinedGraph, self.curPlayer)
else:
self.updateGraph(self.updateCombinedGraph, self.getPlayer(graphType))
if self.dice.numRolls == 0:
self.combinedGraph.set_ylim(0, 1)
self.redGraph.set_ylim(0, 1)
self.yellowGraph.set_ylim(0, 1)
self.canvas.draw()
def updateLoadGraph(self, val=None):
graphType = self.graphType.get()
if graphType == "All":
self.updateGraph(self.updateGroupsGraph)
else:
self.updateGraph(self.updateSeparatedGraph, self.groups[graphType])
self.canvas.draw()
def updateGraph(self, graphFunction, *args):
self.combinedGraph.clear()
self.redGraph.clear()
self.yellowGraph.clear()
graphFunction(*args)
self.redGraph.set_xlabel("Red Die")
self.yellowGraph.set_xlabel("Yellow Die")
self.combinedGraph.yaxis.set_major_locator(ticker.MaxNLocator(integer=True, nbins=5))
self.redGraph.yaxis.set_major_locator(ticker.MaxNLocator(integer=True, nbins=5))
self.yellowGraph.yaxis.set_major_locator(ticker.MaxNLocator(integer=True, nbins=5))
def updateCombinedGraph(self, player=None):
red, yellow, combined = self.getGraphData(player)
self.combinedGraph.bar(range(2, 13), combined, color="blue", edgecolor="black", tick_label=range(2, 13))
self.redGraph.bar(range(1, 7), red, color="red", edgecolor="black", tick_label=range(1, 7))
self.yellowGraph.bar(range(1, 7), yellow, color="yellow", edgecolor="black", tick_label=range(1, 7))
def updateSeparatedGraph(self, group=None):
prevCombined = prevRed = prevYellow = 0
players = group.players if group else self.players
numRolls = group.dice.numRolls if group else self.dice.numRolls
for player in players:
red, yellow, combined = self.getGraphData(player, numRolls)
self.combinedGraph.bar(range(2, 13), combined, edgecolor="black", tick_label=range(2, 13), bottom=prevCombined)
self.redGraph.bar(range(1, 7), red, edgecolor="black", tick_label=range(1, 7), bottom=prevRed)
self.yellowGraph.bar(range(1, 7), yellow, edgecolor="black", tick_label=range(1, 7), bottom=prevYellow)
prevCombined = numpy.add(prevCombined, combined).tolist()
prevRed = numpy.add(prevRed, red).tolist()
prevYellow = numpy.add(prevYellow, yellow).tolist()
self.combinedGraph.legend([p.name for p in players], ncol=min(len(players), 4),
loc="lower center", bbox_to_anchor=(0.5, 0.95))
def updateGroupsGraph(self):
barWidth = 0.9 / len(self.groups)
offset = 0
groupNames = []
for groupName in self.groups:
group = self.groups[groupName]
red, yellow, combined = group.getRolls() if self.graphDisplay.get() == "Number" else group.getPercentages()
self.combinedGraph.bar(numpy.array(range(2, 13)) + offset, combined, edgecolor="black", tick_label=range(2, 13), width=barWidth)
self.redGraph.bar(numpy.array(range(1, 7)) + offset, red, edgecolor="black", tick_label=range(1, 7), width=barWidth)
self.yellowGraph.bar(numpy.array(range(1, 7)) + offset, yellow, edgecolor="black", tick_label=range(1, 7), width=barWidth)
offset += barWidth
groupNames.append(groupName)
self.combinedGraph.legend(groupNames, ncol=min(len(self.groups), 4),
loc="lower center", bbox_to_anchor=(0.5, 0.95))
def getGraphData(self, player=None, numRolls=None):
graphDisplay = self.graphDisplay.get()
if graphDisplay == "Number":
return player.getRolls() if player else self.dice.getRolls()
else:
return player.getPercentages(numRolls) if player else self.dice.getPercentages()
def getPlayer(self, name):
for player in self.controller.players:
if player.name == name:
return player
return None
def endTurn(self):
self.curPlayer = self.curPlayer.next
self.turnName.config(text=self.curPlayer.name)
self.rollNum.config(text="ROLL " + str(self.dice.numRolls))
def end(self, oldFrame, container):
oldFrame.destroy()
resultsFrame = tk.Frame(container)
resultsFrame.pack()
resultsOptions = ["All Players"]
resultsOptions.extend([p.name for p in self.controller.players])
resultsMenu = tk.OptionMenu(resultsFrame, self.resultsType, *resultsOptions, command=self.changeNewResults)
resultsMenu.grid(row=0, column=0, columnspan=3)
resultsMenu.config(font=TEXT_FONT, width=12, anchor="w")
resultsMenu['menu'].config(font=TEXT_FONT)
self.results = tk.Canvas(resultsFrame, relief="ridge", bd=2, width=self.controller.WIDTH*2/5, height=self.controller.HEIGHT/2)
self.results.grid(row=1, column=0, columnspan=3, pady=(0, 20))
self.results.create_text((self.controller.WIDTH/5,0), anchor="n", font=TEXT_FONT, text=self.dice.testDice())
self.saveButton = tk.Button(resultsFrame, text="Save", font=TEXT_FONT, width=10, relief="groove", bd=2, command=self.save)
self.saveButton.grid(row=2, column=0)
tk.Button(resultsFrame, text="Compare", font=TEXT_FONT, width=10, relief="groove", bd=2, state="disabled", command=self.compare
).grid(row=2, column=1)
self.emailButton = tk.Button(resultsFrame, text="Email Results", font=TEXT_FONT, width=10, relief="groove", bd=2, command=self.tryEmailResults)
self.emailButton.grid(row=2, column=2)
self.errorMessage = tk.Label(resultsFrame, text="", font=TEXT_FONT, fg="red")
self.errorMessage.grid(row=3, column=0, columnspan=3)
def changeNewResults(self, val):
self.results.delete("all")
if val == "All Players":
resultsText = self.dice.testDice()
else:
player = self.getPlayer(val)
resultsText = player.testDice()
self.results.create_text((self.controller.WIDTH/5,0), anchor="n", font=TEXT_FONT, text=resultsText)
def changeLoadResults(self, val):
self.results.delete("all")
resultsText = self.groups[val].testDice()
self.results.create_text((self.controller.WIDTH/5,0), anchor="n", font=TEXT_FONT, text=resultsText)
def save(self):
f = self.getFile()
self.filename = f.name
results = self.toDict()
json.dump(results, f)
self.saveButton.config(text="Saved", state="disabled")
def getFile(self):
now = datetime.now()
baseFilename = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
i = 1
filename = baseFilename + ".txt"
path = "data/" + self.controller.diceName + "/"
while os.path.exists(path + filename):
filename = baseFilename + "_" + str(i) + ".txt"
i += 1
return file(path + filename, 'w')
def compare(self):
pass
def tryEmailResults(self):
try:
self.emailResults()
except (socket.gaierror, smtplib.SMTPServerDisconnected):
self.displayError("No Internet connection.")
def emailResults(self):
mailserver = smtplib.SMTP(config.HOST, 587, timeout=5)
mailserver.ehlo()
mailserver.starttls()
mailserver.ehlo()
mailserver.login(config.ADDRESS, config.PASSWORD)
addresses = [player.email for player in self.controller.players]
gameName = self.controller.diceName
attachment = MIMEText(json.dumps(self.toDict()))
attachment.add_header('Content-Disposition', 'attachment', filename=gameName)
msg = MIMEMultipart('alternative')
msg.attach(attachment)
msg['Subject'] = 'Dice results - ' + gameName
msg['From'] = config.ADDRESS
msg['To'] = ",".join(addresses)
mailserver.sendmail(config.ADDRESS, addresses, msg.as_string())
mailserver.close()
self.emailButton.config(text="Sent", state="disabled")
def displayError(self, msg):
self.errorMessage.config(text=msg)
self.errorMessage.after(5000, self.clearError)
def clearError(self):
self.errorMessage.config(text="")
| gpl-3.0 |
khkaminska/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
marcocaccin/Glass_Cycle_Network | src/fundef.py | 1 | 2428 | import numpy as np
from ase.io import read as aseread
from matscipy.neighbours import neighbour_list
import networkx as nx
from sys import argv
import itertools
import pandas as pd
def atoms_to_nxgraph(atoms, cutoff):
ni, nj = neighbour_list('ij', atoms, cutoff)
adjacency_matrix = np.zeros((len(atoms), len(atoms))).astype(np.int)
for i, j in zip (ni, nj):
adjacency_matrix[i,j] = 1
graph = nx.from_numpy_matrix(np.array(adjacency_matrix))
return graph
def minimal_cycles(graph, cutoff=9):
all_cycles = []
for node in graph.nodes():
# Avoid A-B-A cycles with len(p) > 3. Avoid large non-minimal cycles with cutoff=9
cycles = [set(p) for p in nx.algorithms.all_simple_paths(graph, node, node, cutoff=cutoff) if len(p) > 3]
for cycle in cycles:
if cycle not in all_cycles:
all_cycles.append(cycle)
# purge non minimal cycles and non-cycles
for c0 in [cycle for cycle in all_cycles if len(cycle) > 6]:
sub = nx.Graph(graph.subgraph(list(c0)))
if sub.number_of_edges() != sub.number_of_nodes():
all_cycles.remove(c0)
return all_cycles
def cycle_dual_graph(all_cycles):
# create the network of connected cycles
cycle_adjacency = np.zeros((len(all_cycles), len(all_cycles))).astype(np.int)
for i, ci in enumerate(all_cycles):
for j, cj in enumerate(all_cycles):
if j > i:
if len(ci.intersection(cj)) > 0:
cycle_adjacency[i,j] = 1
cycle_adjacency += cycle_adjacency.T
# create the dual network: a node is a minimal ring, an edge is a shared edge between two rings (e.g. an O atom in the real system)
graph_dual = nx.from_numpy_matrix(cycle_adjacency)
return graph_dual
def get_angle_distribution(at, cutoff=1.98):
from matscipy.neighbours import neighbour_list
from itertools import combinations
ii, jj, DD = neighbour_list('ijD', at, cutoff)
angles = []
for atom in at:
if atom.number == 8:
neighs = np.where(ii == atom.index)[0]
# Si_1, Si_2 = jj[neighs]
# D_1, D_2 = DD[neighs]
# print(np.linalg.norm(D_2 - D_1))
# print(Si_1, Si_2)
for Si_1, Si_2 in combinations(jj[neighs], 2):
angle = at.get_angle([Si_1, atom.index, Si_2])
angles.append(angle)
return np.array(angles) | gpl-2.0 |
jstoxrocky/statsmodels | statsmodels/tsa/statespace/tests/test_kalman.py | 6 | 22474 | """
Tests for _statespace module
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
Hamilton, James D. 1994.
Time Series Analysis.
Princeton, N.J.: Princeton University Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError:
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return (prefix, dtype, None)
from statsmodels.tsa.statespace import _statespace as ss
from .results import results_kalman_filter
from numpy.testing import assert_almost_equal, assert_allclose
from nose.exc import SkipTest
prefix_statespace_map = {
's': ss.sStatespace, 'd': ss.dStatespace,
'c': ss.cStatespace, 'z': ss.zStatespace
}
prefix_kalman_filter_map = {
's': ss.sKalmanFilter, 'd': ss.dKalmanFilter,
'c': ss.cKalmanFilter, 'z': ss.zKalmanFilter
}
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Parameters
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Observed data
self.obs = np.array(data['lgdp'], ndmin=2, dtype=dtype, order="F")
# Measurement equation
self.k_endog = k_endog = 1 # dimension of observed data
# design matrix
self.design = np.zeros((k_endog, 4, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [1, 1, 0, 0]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
self.k_states = k_states = 4 # dimension of state space
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype, order="F")
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters'], dtype=dtype
)
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Durbin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
self.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(self.transition[:, :, 0], self.initial_state_cov),
self.transition[:, :, 0].T
)
)
def init_filter(self):
# Use the appropriate Statespace model
prefix = find_best_blas_type((self.obs,))
cls = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
self.model = cls(
self.obs, self.design, self.obs_intercept, self.obs_cov,
self.transition, self.state_intercept, self.selection,
self.state_cov
)
self.model.initialize_known(self.initial_state, self.initial_state_cov)
# Initialize the appropriate Kalman filter
cls = prefix_kalman_filter_map[prefix[0]]
self.filter = cls(self.model, conserve_memory=self.conserve_memory,
loglikelihood_burn=self.loglikelihood_burn)
def run_filter(self):
# Filter the data
self.filter()
# Get results
self.result = {
'loglike': lambda burn: np.sum(self.filter.loglikelihood[burn:]),
'state': np.array(self.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](self.true['start']), self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype, conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self._obs = self.obs
self.obs = np.array(np.r_[self.obs[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F")
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.init_filter()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.init_filter()
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.loglikelihood_burn = self.true['start']
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
# Observed data
self.obs = np.array(data, ndmin=2, dtype=dtype, order="C").T
# Parameters
self.k_endog = k_endog = 2 # dimension of observed data
self.k_states = k_states = 6 # dimension of state space
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Measurement equation
# design matrix
self.design = np.zeros((k_endog, k_states, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype)
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'], dtype=dtype
)
self.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.obs_cov[1, 1, 0] = sigma_ec**2
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Drubin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
self.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(self.transition[:, :, 0], self.initial_state_cov),
self.transition[:, :, 0].T
)
)
def init_filter(self):
# Use the appropriate Statespace model
prefix = find_best_blas_type((self.obs,))
cls = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
self.model = cls(
self.obs, self.design, self.obs_intercept, self.obs_cov,
self.transition, self.state_intercept, self.selection,
self.state_cov
)
self.model.initialize_known(self.initial_state, self.initial_state_cov)
# Initialize the appropriate Kalman filter
cls = prefix_kalman_filter_map[prefix[0]]
self.filter = cls(self.model, conserve_memory=self.conserve_memory,
loglikelihood_burn=self.loglikelihood_burn)
def run_filter(self):
# Filter the data
self.filter()
# Get results
self.result = {
'loglike': lambda burn: np.sum(self.filter.loglikelihood[burn:]),
'state': np.array(self.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
# self.result['loglike'](self.true['start']),
self.result['loglike'](0),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989, self).__init__(dtype=float, conserve_memory=0)
self.init_filter()
self.run_filter()
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, self).__init__(dtype, conserve_memory)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self._obs = self.obs
self.obs = np.array(
np.c_[
self._obs,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
self.init_filter()
self.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1989ForecastDouble, self).__init__()
self.init_filter()
self.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.init_filter()
self.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08,
)
# self.loglikelihood_burn = self.true['start']
self.loglikelihood_burn = 0
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.result['state'][4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.result['state'][5][-1],
self.true_states.iloc[end-1, 3], 4
) | bsd-3-clause |
ericmjl/bokeh | bokeh/core/json_encoder.py | 1 | 9061 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a functions and classes to implement a custom JSON encoder for
serializing objects for BokehJS.
The primary interface is provided by the |serialize_json| function, which
uses the custom |BokehJSONEncoder| to produce JSON output.
In general, functions in this module convert values in the following way:
* Datetime values (Python, Pandas, NumPy) are converted to floating point
milliseconds since epoch.
* TimeDelta values are converted to absolute floating point milliseconds.
* RelativeDelta values are converted to dictionaries.
* Decimal values are converted to floating point.
* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed
though this interface are converted to lists. Note, however, that arrays in
data sources inside Bokeh Documents are converted elsewhere, and by default
use a binary encoded format.
* Bokeh ``Model`` instances are usually serialized elsewhere in the context
of an entire Bokeh Document. Models passed trough this interface are
converted to references.
* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or
all their properties and values.
* ``Color`` instances are converted to CSS color values.
.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`
.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import collections
import datetime as dt
import decimal
import json
# External imports
import numpy as np
# Bokeh imports
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import (
convert_datetime_type,
convert_timedelta_type,
is_datetime_type,
is_timedelta_type,
transform_array,
transform_series,
)
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
rd = import_optional("dateutil.relativedelta")
pd = import_optional('pandas')
__all__ = (
'BokehJSONEncoder',
'serialize_json',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def serialize_json(obj, pretty=None, indent=None, **kwargs):
''' Return a serialized JSON representation of objects, suitable to
send to BokehJS.
This function is typically used to serialize single python objects in
the manner expected by BokehJS. In particular, many datetime values are
automatically normalized to an expected format. Some Bokeh objects can
also be passed, but note that Bokeh models are typically properly
serialized in the context of an entire Bokeh document.
The resulting JSON always has sorted keys. By default. the output is
as compact as possible unless pretty output or indentation is requested.
Args:
obj (obj) : the object to serialize to JSON format
pretty (bool, optional) :
Whether to generate prettified output. If ``True``, spaces are
added after added after separators, and indentation and newlines
are applied. (default: False)
Pretty output can also be enabled with the environment variable
``BOKEH_PRETTY``, which overrides this argument, if set.
indent (int or None, optional) :
Amount of indentation to use in generated JSON output. If ``None``
then no indentation is used, unless pretty output is enabled,
in which case two spaces are used. (default: None)
Any additional keyword arguments are passed to ``json.dumps``, except for
some that are computed internally, and cannot be overridden:
* allow_nan
* indent
* separators
* sort_keys
Examples:
.. code-block:: python
>>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))
>>>print(serialize_json(data))
{"a":[0,1,2],"b":1483228800000.0}
>>> print(serialize_json(data, pretty=True))
{
"a": [
0,
1,
2
],
"b": 1483228800000.0
}
'''
# these args to json.dumps are computed internally and should not be passed along
for name in ['allow_nan', 'separators', 'sort_keys']:
if name in kwargs:
raise ValueError("The value of %r is computed internally, overriding is not permissible." % name)
pretty = settings.pretty(pretty)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehJSONEncoder(json.JSONEncoder):
''' A custom ``json.JSONEncoder`` subclass for encoding objects in
accordance with the BokehJS protocol.
'''
def transform_python_types(self, obj):
''' Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
# date/time values that get serialized as milliseconds
if is_datetime_type(obj):
return convert_datetime_type(obj)
if is_timedelta_type(obj):
return convert_timedelta_type(obj)
# Date
if isinstance(obj, dt.date):
return obj.isoformat()
# slice objects
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.stop, step=obj.step)
# NumPy scalars
elif np.issubdtype(type(obj), np.floating):
return float(obj)
elif np.issubdtype(type(obj), np.integer):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Decimal values
elif isinstance(obj, decimal.Decimal):
return float(obj)
# RelativeDelta gets serialized as a dict
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years,
months=obj.months,
days=obj.days,
hours=obj.hours,
minutes=obj.minutes,
seconds=obj.seconds,
microseconds=obj.microseconds)
else:
return super().default(obj)
def default(self, obj):
''' The required ``default`` method for ``JSONEncoder`` subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
from ..model import Model
from ..colors import Color
from .has_props import HasProps
# array types -- use force_list here, only binary
# encoding CDS columns for now
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
wbyne/QGIS | python/plugins/processing/algs/qgis/BarPlot.py | 7 | 3278 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class BarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Bar plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT, self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Bar plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[valuefieldname], width, color='r')
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
abitofalchemy/hrg_nets | sa_inf_mirror.py | 1 | 7806 | import math
import re
import argparse
import traceback
import sys, os
import david as pcfg
import networkx as nx
import pandas as pd
import graph_sampler as gs
import product
import tw_karate_chop as tw
from cikm_experiments import kronfit
from gg import binarize, graph_checks, graphical_degree_sequence, grow
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
__authors__ = 'saguinag,tweninge,dchiang'
__contact__ = '{authors}@nd.edu'
__version__ = "0.1.0"
# sa_inf_mirror.py
# VersionLog:
# 0.1.0 Initial state;
prod_rules = {}
debug = False
dbg = False
def degree_probabilility_distribution(orig_g, mGraphs, gname):
with open('../Results/inf_deg_dist_{}_{}.txt'.format(gname,nick), 'w') as f:
d = orig_g.degree()
n = orig_g.number_of_nodes()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby([1]).count()
gb['pk'] = gb[0]/float(n)
f.write('# original graph degree prob distr\n')
for row in gb['pk'].iteritems():
f.write('({}, {})\n'.format(row[0],row[1]))
mdf = pd.DataFrame()
for i, hstar in enumerate(mGraphs):
d = hstar.degree()
df = pd.DataFrame.from_dict(d.items())
mdf = pd.concat([mdf, df])
mgb = mdf.groupby([1]).count()
mgb['pk'] = mgb[0]/float(n)/float(len(mGraphs))
f.write('# synth graph {} \n'.format(i))
for row in mgb['pk'].iteritems():
f.write('({}, {})\n'.format(row[0], row[1]))
def learn_grammars_production_rules(input_graph):
G = input_graph
# print G.number_of_nodes()
# print G.number_of_edges()
num_nodes = G.number_of_nodes()
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
graph_checks(G)
if dbg:
print
print "--------------------"
print "-Tree Decomposition-"
print "--------------------"
if num_nodes >= 500:
for Gprime in gs.rwr_sample(G, 2, 100):
T = tw.quickbb(Gprime)
root = list(T)[0]
T = tw.make_rooted(T, root)
T = binarize(T)
root = list(T)[0]
root, children = T
tw.new_visit(T, G, prod_rules)
else:
T = tw.quickbb(G)
root = list(T)[0]
T = tw.make_rooted(T, root)
T = binarize(T)
root = list(T)[0]
root, children = T
tw.new_visit(T, G, prod_rules)
# return
return prod_rules
def PHRG(G, gname):
n = G.number_of_nodes()
target_nodes = n
# degree_sequence = G.degree().values()
prod_rules = learn_grammars_production_rules(G)
if dbg:
print
print "--------------------"
print "- Production Rules -"
print "--------------------"
for k in prod_rules.iterkeys():
# print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(s) # normailization step to create probs not counts.
# print '\t -> ', d, prod_rules[k][d]
#
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
# print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
print "Starting max size"
g.set_max_size(target_nodes)
print "Done with max size"
rule_list = g.sample(target_nodes)
# PHRG
pred_graph = grow(rule_list, g)[0]
return pred_graph
def CLGM(G, gname):
target_nodes = G.number_of_edges()
z = graphical_degree_sequence(target_nodes,1)
pred_graph = nx.expected_degree_graph(z)
return pred_graph
def KPGM(G, gname):
target_nodes = G.number_of_nodes()
k = int(math.log(target_nodes, 2))
print 'k=', k
# from: i:/data/saguinag/Phoenix/demo_graphs/karate.txt
# karate: P = [[0.9999,0.661],[0.661, 0.01491]]
# Interent: autonomous systems
# P = [[0.9523, 0.585],[0.585, 0.05644]]
P = kronfit(G)
#print 'kronfit params (matrix):', P
pred_graph = product.kronecker_random_graph(k, P)
for u, v in pred_graph.selfloop_edges():
pred_graph.remove_edge(u, v)
return pred_graph
def main(gname_path):
if gname_path is None: return
print gname_path
G = nx.read_edgelist(gname_path)
avg_synth_graphs = []
for i in range(10):
synth_graph = PHRG(G, 'phrg')
avg_synth_graphs.append(synth_graph)
print 'performing deg dist'
degree_probabilility_distribution(G, avg_synth_graphs, gname='phrg')
def average_10th_recursion(gname_path):
if gname_path is None: return
print gname_path
orig_g = nx.read_edgelist(gname_path, comments='%')
phrg_avg_synth_graphs = []
kpgm_avg_synth_graphs = []
clgm_avg_synth_graphs = []
for run in range(10):
print 'run >', run
G = orig_g
for rec in range(10):
synth_graph = PHRG(G, 'phrg_Ten')
G = synth_graph
phrg_avg_synth_graphs.append(synth_graph) # takes the 10th recursion
for rec in range(10):
synth_graph = KPGM(G, 'kpgm_Ten')
G = synth_graph
# end recursion
kpgm_avg_synth_graphs.append(synth_graph) # takes the 10th recursion
for rec in range(10):
synth_graph = CLGM(G, 'clgm_Ten')
G = synth_graph
# end recursion
clgm_avg_synth_graphs.append(synth_graph) # takes the 10th recursion
print 'performing deg dist'
G = nx.read_edgelist(gname_path,comments='%')
degree_probabilility_distribution(G, phrg_avg_synth_graphs, gname='phrg_10th')
degree_probabilility_distribution(G, kpgm_avg_synth_graphs, gname='kpgm_10th')
degree_probabilility_distribution(G, clgm_avg_synth_graphs, gname='clgm_10th')
def average_1st_recursion(gname_path):
if gname_path is None: return
print gname_path
orig_g = nx.read_edgelist(gname_path, comments='%')
phrg_avg_synth_graphs = []
kpgm_avg_synth_graphs = []
clgm_avg_synth_graphs = []
for run in range(10):
print '>', run
G = orig_g
for rec in range(10):
synth_graph = PHRG(G, 'phrg_One')
break
phrg_avg_synth_graphs.append(synth_graph) # takes the 10th recursion
for rec in range(10):
synth_graph = KPGM(G, 'kpgm_One')
break
# end recursion
kpgm_avg_synth_graphs.append(synth_graph) # takes the 10th recursion
for rec in range(10):
synth_graph = CLGM(G, 'clgm_One')
break
# end recursion
clgm_avg_synth_graphs.append(synth_graph) # takes the 10th recursion
print 'performing deg dist'
G = nx.read_edgelist(gname_path, comments='%')
degree_probabilility_distribution(G, phrg_avg_synth_graphs, gname='phrg_1st')
degree_probabilility_distribution(G, kpgm_avg_synth_graphs, gname='kpgm_1st')
degree_probabilility_distribution(G, clgm_avg_synth_graphs, gname='clgm_1st')
print 'Done with: average_1st_recursion'
def get_parser():
parser = argparse.ArgumentParser(description='sa_inf_mirror')
parser.add_argument('graph_path', metavar='GRAPH_PATH', nargs=1, help='the graph name to process')
parser.add_argument('nick_name', metavar='NICK_NAME', nargs=1, help='Nick name for the output file')
parser.add_argument('--version', action='version', version=__version__)
return parser
# ~~~~~~~~~~~~~~~~
# * Main - Begin *
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
global nick
nick = args['nick_name'][0]
try:
# main(args['graph_path'][0])
average_1st_recursion(args['graph_path'][0])
average_10th_recursion(args['graph_path'][0])
except Exception, e:
print 'ERROR, UNEXPECTED SAVE PLOT EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
print 'Done'
sys.exit(0)
| gpl-3.0 |
ankurankan/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 1 | 50480 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided coef_ does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'auto':
raise ValueError("class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight('auto', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.")
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
if class_weight is not None:
warnings.warn("You are trying to set class_weight through the fit "
"method, which will be deprecated in version "
"v0.17 of scikit-learn. Pass the class_weight into "
"the constructor instead.", DeprecationWarning)
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the coef_ attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=False,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = y.astype(np.float64)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the coef_ attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights asigned to the features.
intercept_ : array, shape (1,)
The intercept term.
`average_coef_` : array, shape (n_features,)
Averaged weights assigned to the features.
`average_intercept_` : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=False, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
aetilley/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
dingocuster/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
moutai/scikit-learn | examples/tree/plot_iris.py | 86 | 1965 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_inst_NoRot/Geneva_inst_NoRot_0/fullgrid/peaks_reader.py | 32 | 5021 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ---------------------------------------------------
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/mpl_toolkits/axisartist/axisline_style.py | 12 | 5285 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.patches import _Style, FancyArrowPatch
from matplotlib.transforms import IdentityTransform
from matplotlib.path import Path
import numpy as np
class _FancyAxislineStyle(object):
class SimpleArrow(FancyArrowPatch):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
arrow_transmuter=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def set_line_mutation_scale(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
from matplotlib.bezier import get_cos_sin
x0, y0 = path.vertices[-2]
x1, y1 = path.vertices[-1]
cost, sint = get_cos_sin(x0, y0, x1, y1)
d = mutation_size * 1.
x2, y2 = x1 + cost*d, y1+sint*d
if path.codes is None:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
return _path
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) transform the path to the display coordinate.
2) extend the path to make a room for arrow
3) update the path of the FancyArrowPatch.
4) draw
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() #line_mutation_scale()
extented_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extented_path
FancyArrowPatch.draw(self, renderer)
class FilledArrow(SimpleArrow):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "-|>"
class AxislineStyle(_Style):
"""
:class:`AxislineStyle` is a container class which defines style classes
for AxisArtists.
An instance of any axisline style class is an callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return a mpl artist with following
methods implemented. ::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base(object):
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super(AxislineStyle._Base, self).__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path
(set_path method), return the mpl artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
*size*
size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super(AxislineStyle.SimpleArrow, self).__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0,0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
_style_list["-|>"] = FilledArrow
| mit |
rcrowder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| agpl-3.0 |
woodscn/scipy | scipy/stats/morestats.py | 4 | 96359 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats, _contains_nan
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r',
'extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms.
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',
'gumbel_r', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = asarray(x)
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
nan_policy = kwds.pop('nan_policy', 'propagate')
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan, np.nan, np.nan, None
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).sum(axis=axis)
C = cos(ang).sum(axis=axis)
res = arctan2(S, C)*(high - low)/2.0/pi + low
mask = (S == .0) * (C == .0)
if mask.ndim > 0:
res[mask] = np.nan
return res
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| bsd-3-clause |
johannfaouzi/pyts | pyts/approximation/mcb.py | 1 | 8924 | """Code for Multiple Coefficient Binning."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
from numba import njit, prange
from scipy.stats import norm
from sklearn.base import BaseEstimator
from ..base import UnivariateTransformerMixin
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from sklearn.utils.multiclass import check_classification_targets
@njit()
def _uniform_bins(timestamp_min, timestamp_max, n_timestamps, n_bins):
bin_edges = np.empty((n_timestamps, n_bins - 1))
for i in prange(n_timestamps):
bin_edges[i] = np.linspace(
timestamp_min[i], timestamp_max[i], n_bins + 1
)[1:-1]
return bin_edges
@njit()
def _digitize_1d(X, bins, n_samples, n_timestamps):
X_digit = np.empty((n_samples, n_timestamps))
for i in prange(n_timestamps):
X_digit[:, i] = np.digitize(X[:, i], bins, right=True)
return X_digit
@njit()
def _digitize_2d(X, bins, n_samples, n_timestamps):
X_digit = np.empty((n_samples, n_timestamps))
for i in prange(n_timestamps):
X_digit[:, i] = np.digitize(X[:, i], bins[i], right=True)
return X_digit
def _digitize(X, bins):
n_samples, n_timestamps = X.shape
if bins.ndim == 1:
X_binned = _digitize_1d(X, bins, n_samples, n_timestamps)
else:
X_binned = _digitize_2d(X, bins, n_samples, n_timestamps)
return X_binned.astype('int64')
class MultipleCoefficientBinning(BaseEstimator, UnivariateTransformerMixin):
"""Bin continuous data into intervals column-wise.
Parameters
----------
n_bins : int (default = 4)
The number of bins to produce. It must be between 2 and
``min(n_samples, 26)``.
strategy : str (default = 'quantile')
Strategy used to define the widths of the bins:
- 'uniform': All bins in each sample have identical widths
- 'quantile': All bins in each sample have the same number of points
- 'normal': Bin edges are quantiles from a standard normal distribution
- 'entropy': Bin edges are computed using information gain
alphabet : None, 'ordinal' or array-like, shape = (n_bins,)
Alphabet to use. If None, the first `n_bins` letters of the Latin
alphabet are used if `n_bins` is lower than 27, otherwise the alphabet
will be defined to [chr(i) for i in range(n_bins)]. If 'ordinal',
integers are used.
Attributes
----------
bin_edges_ : array, shape = (n_bins - 1,) or (n_timestamps, n_bins - 1)
Bin edges with shape = (n_bins - 1,) if ``strategy='normal'`` or
(n_timestamps, n_bins - 1) otherwise.
References
----------
.. [1] P. Schäfer, and M. Högqvist, "SFA: A Symbolic Fourier Approximation
and Index for Similarity Search in High Dimensional Datasets",
International Conference on Extending Database Technology,
15, 516-527 (2012).
Examples
--------
>>> from pyts.approximation import MultipleCoefficientBinning
>>> X = [[0, 4],
... [2, 7],
... [1, 6],
... [3, 5]]
>>> transformer = MultipleCoefficientBinning(n_bins=2)
>>> print(transformer.fit_transform(X))
[['a' 'a']
['b' 'b']
['a' 'b']
['b' 'a']]
"""
def __init__(self, n_bins=4, strategy='quantile', alphabet=None):
self.n_bins = n_bins
self.strategy = strategy
self.alphabet = alphabet
def fit(self, X, y=None):
"""Compute the bin edges for each feature.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Data to transform.
y : None or array-like, shape = (n_samples,)
Class labels for each sample. Only used if ``strategy='entropy'``.
"""
if self.strategy == 'entropy':
if y is None:
raise ValueError("y cannot be None if strategy='entropy'.")
X, y = check_X_y(X, y, dtype='float64')
check_classification_targets(y)
else:
X = check_array(X, dtype='float64')
n_samples, n_timestamps = X.shape
self._n_timestamps_fit = n_timestamps
self._alphabet = self._check_params(n_samples)
self._check_constant(X)
self.bin_edges_ = self._compute_bins(
X, y, n_timestamps, self.n_bins, self.strategy)
return self
def transform(self, X):
"""Bin the data.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Data to transform.
Returns
-------
X_new : array, shape = (n_samples, n_timestamps)
Binned data.
"""
check_is_fitted(self, 'bin_edges_')
X = check_array(X, dtype='float64')
self._check_consistent_lengths(X)
indices = _digitize(X, self.bin_edges_)
if isinstance(self._alphabet, str):
return indices
else:
return self._alphabet[indices]
def _check_params(self, n_samples):
if not isinstance(self.n_bins, (int, np.integer)):
raise TypeError("'n_bins' must be an integer.")
if not 2 <= self.n_bins <= min(n_samples, 26):
raise ValueError(
"'n_bins' must be greater than or equal to 2 and lower than "
"or equal to min(n_samples, 26) (got {0}).".format(self.n_bins)
)
if self.strategy not in ['uniform', 'quantile', 'normal', 'entropy']:
raise ValueError("'strategy' must be either 'uniform', 'quantile',"
" 'normal' or 'entropy' (got {0})."
.format(self.strategy))
if not ((self.alphabet is None)
or (self.alphabet == 'ordinal')
or (isinstance(self.alphabet, (list, tuple, np.ndarray)))):
raise TypeError("'alphabet' must be None, 'ordinal' or array-like "
"with shape (n_bins,) (got {0})."
.format(self.alphabet))
if self.alphabet is None:
alphabet = np.array([chr(i) for i in range(97, 97 + self.n_bins)])
elif self.alphabet == 'ordinal':
alphabet = 'ordinal'
else:
alphabet = check_array(self.alphabet, ensure_2d=False, dtype=None)
if alphabet.shape != (self.n_bins,):
raise ValueError("If 'alphabet' is array-like, its shape "
"must be equal to (n_bins,).")
return alphabet
def _check_constant(self, X):
if np.any(np.max(X, axis=0) - np.min(X, axis=0) == 0):
raise ValueError("At least one timestamp is constant.")
def _check_consistent_lengths(self, X):
if self._n_timestamps_fit != X.shape[1]:
raise ValueError(
"The number of timestamps in X must be the same as "
"the number of timestamps when `fit` was called "
"({0} != {1}).".format(self._n_timestamps_fit, X.shape[1])
)
def _compute_bins(self, X, y, n_timestamps, n_bins, strategy):
if strategy == 'normal':
bins_edges = norm.ppf(np.linspace(0, 1, self.n_bins + 1)[1:-1])
elif strategy == 'uniform':
timestamp_min, timestamp_max = np.min(X, axis=0), np.max(X, axis=0)
bins_edges = _uniform_bins(timestamp_min, timestamp_max,
n_timestamps, n_bins)
elif strategy == 'quantile':
bins_edges = np.percentile(
X, np.linspace(0, 100, self.n_bins + 1)[1:-1], axis=0
).T
if np.any(np.diff(bins_edges, axis=0) == 0):
raise ValueError(
"At least two consecutive quantiles are equal. "
"Consider trying with a smaller number of bins or "
"removing timestamps with low variation."
)
else:
bins_edges = self._entropy_bins(X, y, n_timestamps, n_bins)
return bins_edges
def _entropy_bins(self, X, y, n_timestamps, n_bins):
bins = np.empty((n_timestamps, n_bins - 1))
clf = DecisionTreeClassifier(criterion='entropy',
max_leaf_nodes=n_bins)
for i in range(n_timestamps):
clf.fit(X[:, i][:, None], y)
threshold = clf.tree_.threshold[clf.tree_.children_left != -1]
if threshold.size < (n_bins - 1):
raise ValueError(
"The number of bins is too high for timestamp {0}. "
"Consider trying with a smaller number of bins or "
"removing this timestamp.".format(i)
)
bins[i] = threshold
return np.sort(bins, axis=1)
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
samzhang111/scikit-learn | examples/classification/plot_lda_qda.py | 78 | 5046 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
agutieda/QuantEcon.py | quantecon/models/solow/impulse_response.py | 7 | 10840 | """
Classes for generating and plotting impulse response functions.
@author : David R. Pugh
@date : 2014-10-06
"""
from __future__ import division
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
class ImpulseResponse(object):
"""Base class representing an impulse response function for a Model."""
# number of points to use for "padding"
N = 10
# length of impulse response
T = 100
def __init__(self, model):
"""
Create an instance of the ImpulseResponse class.
Parameters
----------
model : model.Model
Instance of the model.Model class representing a Solow model.
"""
self.model = model
def __repr__(self):
"""Machine readable summary of a ImpulseResponse instance."""
return self.__str__()
def __str__(self):
"""Human readable summary of a ImpulseResponse instance."""
m = """
Impulse response function (IRF):
- N (number of points used for padding) : {N:d}
- T (length of the impulse response) : {T:d}
"""
formatted_str = dedent(m.format(N=self.N, T=self.T))
return formatted_str
@property
def _padding(self):
"""
Impulse response functions are "padded" for pretty plotting.
:getter: Return the current "padding" values.
:type: numpy.ndarray
"""
return np.hstack((self._padding_time, self._padding_variables))
@property
def _padding_scaling_factor(self):
"""
Scaling factor used in constructing the impulse response function
"padding".
:getter: Return the current scaling factor.
:type: numpy.ndarray
"""
# extract the relevant parameters
A0 = self.model.params['A0']
L0 = self.model.params['L0']
g = self.model.params['g']
n = self.model.params['n']
if self.kind == 'per_capita':
factor = A0 * np.exp(g * self._padding_time)
elif self.kind == 'levels':
factor = A0 * L0 * np.exp((g + n) * self._padding_time)
else:
factor = np.ones(self.N)
return factor.reshape((self.N, 1))
@property
def _padding_time(self):
"""
The independent variable, time, is "padded" using values from -N to -1.
:getter: Return the current "padding" values.
:type: numpy.ndarray
"""
return np.linspace(-self.N, -1, self.N).reshape((self.N, 1))
@property
def _padding_variables(self):
"""
Impulse response functions for endogenous variables are "padded" with
N periods of steady state values.
:getter: Return current "padding" values.
:kind: numpy.ndarray
"""
# economy is initial in steady state
k0 = self.model.steady_state
y0 = self.model.evaluate_intensive_output(k0)
c0 = self.model.evaluate_consumption(k0)
i0 = self.model.evaluate_actual_investment(k0)
intitial_condition = np.array([[k0, y0, c0, i0]])
return self._padding_scaling_factor * intitial_condition
@property
def _response(self):
"""
Response functions combined independent and endogenous variables.
:getter: Return the current response values.
:type: numpy.ndarray
"""
return np.hstack((self._response_time, self._response_variables))
@property
def _response_time(self):
"""
The independent variable, time, for the response ranges from 0 to T.
:getter: Return the current resonse time values.
:type: numpy.ndarray
"""
return np.linspace(0, self.T, self.T + 1).reshape((self.T + 1, 1))
@property
def _response_variables(self):
"""
Response of endogenous variables to exogenous impulse.
:getter: Return the current response.
:type: numpy.ndarray
"""
# economy is initial in steady state
k0 = self.model.steady_state
# apply the impulse...force validate params!
tmp_params = self.model.params.copy()
tmp_params.update(self.impulse)
self.model.params = tmp_params
# ...and generate the response
soln = self.model.ivp.solve(t0=0.0, y0=k0, h=1.0, T=self.T,
integrator='dop853')
# gather the results
k = soln[:, 1][:, np.newaxis]
y = self.model.evaluate_intensive_output(k)
c = self.model.evaluate_consumption(k)
i = self.model.evaluate_actual_investment(k)
return self._response_scaling_factor * np.hstack((k, y, c, i))
@property
def _response_scaling_factor(self):
"""
Scaling factor used in constructing the impulse response.
:getter: Return the current scaling factor.
:type: numpy.ndarray
"""
# extract the relevant parameters
A0 = self.model.params['A0']
L0 = self.model.params['L0']
g = self.model.params['g']
n = self.model.params['n']
if self.kind == 'per_capita':
factor = A0 * np.exp(g * self._response_time)
elif self.kind == 'levels':
factor = A0 * L0 * np.exp((g + n) * self._response_time)
else:
factor = np.ones(self.T + 1)
return factor.reshape((self.T + 1, 1))
@property
def impulse(self):
"""
Dictionary of new parameter values representing an impulse.
:getter: Return the current impulse dictionary.
:setter: Set a new impulse dictionary.
:type: dictionary
"""
return self._impulse
@property
def kind(self):
"""
The kind of impulse response function to generate. Must be one of:
'levels', 'per_capita', 'efficiency_units'.
:getter: Return the current kind of impulse responses.
:setter: Set a new value for the kind of impulse responses.
:type: str
"""
return self._kind
@property
def impulse_response(self):
"""
Impulse response functions generated by a shock to model parameter(s).
:getter: Return the current impulse response functions.
:type: numpy.ndarray
"""
orig_params = self.model.params.copy()
# create the irf
tmp_irf = np.vstack((self._padding, self._response))
# reset the model parameters
self.model.params = orig_params
return tmp_irf
@impulse.setter
def impulse(self, params):
"""Set a new impulse dictionary."""
self._impulse = self._validate_impulse(params)
@kind.setter
def kind(self, value):
"""Set a new value for the kind attribute."""
self._kind = self._validate_kind(value)
def _validate_impulse(self, params):
"""Validates the impulse attribute."""
if not isinstance(params, dict):
mesg = "ImpulseResponse.impulse must have type dict, not {}."
raise AttributeError(mesg.format(params.__class__))
elif not set(params.keys()) <= set(self.model.params.keys()):
mesg = "Invalid parameter included in the impulse dictionary."""
raise AttributeError(mesg)
else:
return params
@staticmethod
def _validate_kind(value):
"""Validates the kind attribute."""
valid_kinds = ['levels', 'per_capita', 'efficiency_units']
if value not in valid_kinds:
mesg = "The 'kind' attribute must be in {}."
raise AttributeError(mesg.format(valid_kinds))
else:
return value
def plot_impulse_response(self, ax, variable, log=False):
"""
Plot an impulse response function.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
variable : str
Variable whose impulse response functions you wish to plot.
impulse : dict
Dictionary of new parameter values representing the impulse whose
model response you wish to plot.
kind : str (default='efficiency_units')
Whether you want impulse response functions in 'levels',
'per_capita', or 'efficiency_units'.
log : boolean (default=False)
Whether or not to have logarithmic scales on the vertical axes.
Useful when plotting impulse response functions with
kind='per_capita' or kind='levels'.
Returns
-------
A list containing:
irf_line : maplotlib.lines.Line2D
A Line2D object representing the impulse response for the requested
variable.
bgp_line : maplotlib.lines.Line2D
A Line2D object representing the pre-impulse balanced growth path
for the model.
"""
# create a mapping from variables to column indices
irf = self.impulse_response
irf_dict = {'capital': irf[:, [0, 1]],
'output': irf[:, [0, 2]],
'consumption': irf[:, [0, 3]],
'investment': irf[:, [0, 4]],
}
# create the plot
traj = irf_dict[variable]
irf_line = ax.plot(traj[:, 0], traj[:, 1])
# add the old balanced growth path
g = self.model.params['g']
n = self.model.params['n']
t = self.N + traj[:, 0]
if self.kind == 'per_capita':
bgp_line = ax.plot(traj[:, 0], traj[0, 1] * np.exp(g * t), 'k--',
label='Original BGP')
ax.set_ylabel(variable.title() + ' (per capita)', fontsize=15,
family='serif')
elif self.kind == 'levels':
bgp_line = ax.plot(traj[:, 0], traj[0, 1] * np.exp((g + n) * t),
'k--', label='Original BGP')
ax.set_ylabel(variable.title(), fontsize=15, family='serif')
else:
bgp_line = ax.axhline(traj[0, 1], linestyle='dashed', color='k',
label='Original BGP')
ax.set_ylabel(variable.title() + ' (per unit effective labor)',
fontsize=15, family='serif')
# format axes, labels, title, legend, etc
ax.set_xlabel('Time', fontsize=15, family='serif')
ax.set_ylim(0.95 * traj[:, 1].min(), 1.05 * traj[:, 1].max())
if log is True:
ax.set_yscale('log')
ax.set_title('Impulse response function', fontsize=20, family='serif')
ax.grid('on')
ax.legend(loc=0, frameon=False, bbox_to_anchor=(1.0, 1.0),
prop={'family': 'serif'})
return [irf_line, bgp_line]
| bsd-3-clause |
AOSP-S4-KK/platform_external_chromium_org | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 26 | 11131 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
mishless/LearningSystems | a3/ga.py | 1 | 10961 | # Genetic Algorithm for solving the Traveling Salesman problem
# Authors: Mihaela Stoycheva, Vukan Turkulov
# Includes
import configparser
import math
import matplotlib.pyplot as plt
import numpy
import random
import sys
from operator import itemgetter
#Global variables(yay!)
# Configuration variables(read from config.txt)
mutation_rate = 0;
population_size = 0;
elitism_rate = 0;
tournament_rate = 0;
max_iterations = 0;
input_file_name = "";
parent_rate = 0;
# General global variables
cities = {};
number_of_cities = 0;
parent_number = 0;
tournament_size = 0;
elite_number = 0;
crossover_number = 0;
def read_config():
global mutation_rate;
global elitism_rate;
global tournament_rate;
global population_size;
global input_file_name;
global max_iterations;
global parent_rate;
global parent_number;
global tournament_size;
global elite_number;
global crossover_number;
config = configparser.ConfigParser();
config.read("config.txt");
mutation_rate = float(config['general']['mutation_rate']);
population_size = int(config['general']['population_size']);
elitism_rate = float(config['general']['elitism_rate']);
tournament_rate = float(config['general']['tournament_rate']);
max_iterations = int(config['general']['max_iterations']);
parent_rate = float(config['general']['parent_rate']);
input_file_name = config['general']['input_file_name'];
parent_number = int(population_size * parent_rate);
elite_number = int(population_size * elitism_rate);
tournament_size = int(population_size * tournament_rate);
crossover_number = population_size - elite_number;
def print_config():
print("***** CONFIGURATION *****");
print_var("Population size", population_size);
print_var("Elitism rate", elitism_rate);
print_var("Tournament rate", tournament_rate);
print_var("Mutation rate", mutation_rate);
print_var("Parent rate", parent_rate);
print_var("Iteration number", max_iterations);
print("");
print_var("Tournament size", tournament_size);
print_var("Parent number", parent_number);
print_var("Elite number", elite_number);
print_var("Crossover number", crossover_number);
print("");
def read_input_file():
global number_of_cities;
file = open(input_file_name, "r");
file_lines = file.readlines();
file.close();
for file_line in file_lines:
temp = file_line.split();
cities[int(temp[0])] = {'x' : float(temp[1]), 'y' : float(temp[2])};
number_of_cities = len(cities);
def get_distance(city1, city2):
return math.sqrt( ((city1['x']-city2['x'])**2) +
((city1['y']-city2['y'])**2));
def print_cities():
print("***** CITIES *****");
for key, city in cities.items():
print("#" + "%2s" % str(key) + ": (" +
"%6s" % str(city['x']) + ', ' +
"%6s" % str(city['y']) + ')');
print("");
def print_var(name, var):
print(name + ":" + " "*(17-len(name)) + str(var));
def init():
read_config();
read_input_file();
print_config();
def create_random_individual():
individual = [];
# We must begin at first city
individual.append(1);
# Create list of city indexes
indexes = list(range(2,number_of_cities+1));
while len(indexes) > 0:
picked_index = random.choice(indexes);
indexes.remove(picked_index);
individual.append(picked_index);
# We must end at first city
individual.append(1);
return individual;
def print_population(population, name):
print("***** POPULATION: " + name + " *****");
print("Population size = " + str(len(population)));
i = 0;
for individual in population:
print("IND #" + str(i) + ": " + str(individual));
i += 1;
def print_population_2(population, name):
print("***** POPULATION: " + name + " *****");
print("Population size = " + str(len(population)));
i = 0;
for individual in population:
print("IND #" + str(i) + " distance = " +
str(evaluate_individual(individual)));
i += 1;
print("");
def print_population_3(population, name):
print("***** POPULATION: " + name + " *****");
print("Population size = " + str(len(population)));
for individual in population:
print(str(individual) + ": distance = " +
str(evaluate_individual(individual)));
print("");
def create_random_population(population_size):
population = [];
for i in range(0, population_size):
population.append(create_random_individual());
return population;
def evaluate_individual(individual):
distance_traveled = 0;
for i in range(0, len(individual)-1):
distance_traveled = (distance_traveled +
get_distance(cities[individual[i]], cities[individual[i+1]]));
return distance_traveled;
def evaluate_population(population):
evaluations = [];
for individual in population:
evaluations.append((evaluate_individual(individual), individual));
return evaluations;
def select_tournament_pool(data):
tournament_pool = [];
indexes = list(range(0, len(data)));
for i in range(0, tournament_size):
chosen_index = random.choice(indexes);
tournament_pool.append(data[chosen_index]);
indexes.remove(chosen_index);
return tournament_pool;
def best_solution(pool):
best_individual = {'eval' : sys.float_info.max};
for individual in pool:
if individual['eval'] < best_individual['eval']:
best_individual = individual;
return best_individual;
def run_tournament(pool):
return best_solution(pool);
def merge_popul_and_eval(population, evaluations):
data = [];
for i in range(0, len(population)):
data.append({'ind' : population[i],
'eval' : evaluations[i]});
return data;
def select_parent_pool(population, evaluations):
parent_pool = [];
data = merge_popul_and_eval(population, evaluations);
for i in range(0, parent_number):
tournament_pool = select_tournament_pool(data);
parent = run_tournament(tournament_pool);
parent_pool.append(parent['ind']);
data.remove(parent);
return parent_pool;
def is_individual_valid(individual):
if(len(individual) != (number_of_cities+1)):
print("INVALID " + str(individual));
return False;
if(individual[0] != 1):
print("INVALID " + str(individual));
return False;
if(individual[-1] != 1):
print("INVALID " + str(individual));
return False;
for city in individual:
if city == 1:
if individual.count(city) != 2:
print("INVALID " + str(individual));
return False;
else:
if individual.count(city) != 1:
print("INVALID " + str(individual));
return False;
return True;
def is_population_valid(population):
for individual in population:
if is_individual_valid(individual) == False:
return False;
return True;
def create_child(parent1, parent2):
l = len(parent1);
x = random.randint(1, l-1);
y = random.randint(x, l-1);
child = [];
extract = parent1[x:y];
"""print_var("P1", parent1);
print_var("P2", parent2);
print_var("x", x);
print_var("y", y);
print_var("Extract", extract);"""
i = 0;
for j in range(0, x):
while(parent2[i] in extract):
i += 1;
child.append(parent2[i]);
i += 1;
child.extend(extract);
for j in range(y, l):
while(parent2[i] in extract):
i += 1;
child.append(parent2[i]);
i += 1;
return child;
def generate_children(parent_pool, child_num):
children = [];
for i in range(0, child_num):
parent1 = random.choice(parent_pool);
parent_pool.remove(parent1);
parent2 = random.choice(parent_pool);
parent_pool.append(parent1);
new_child = create_child(parent1, parent2);
children.append(new_child);
return children;
def generate_elites(population, evaluations, number):
data = merge_popul_and_eval(population, evaluations);
elites = [];
for i in range(0, number):
best = best_solution(data);
elites.append(best['ind']);
data.remove(best);
return elites;
def mutate_individual(individual):
i = random.randint(1, len(individual)-2);
j = i;
while j == i:
j = random.randint(1, len(individual)-2);
individual[i], individual[j] = individual[j], individual[i];
def mutate_population(population):
for individual in population:
if random.random() < mutation_rate:
mutate_individual(individual);
def test_stuff():
"""
p1 = "abcdefg";
p2 = "1234567";
for i in range(0,10):
print(create_child(p1,p2));
ind = [1,2,3,4,5,6];
print("Before", ind);
mutate_individual(ind);
print("After", ind);
exit();"""
def perform_GA():
best_solutions = [];
best_individuals = [];
best_solution = None;
#print("***** ALGORITHM START *****");
population = create_random_population(population_size);
iteration_counter = 1;
while True:
#print("Running iteration " + str(iteration_counter) + ":");
evaluations = evaluate_population(population);
best_solution = min(evaluations, key=lambda evaluation:evaluation[0])
best_solutions.append(best_solution[0]);
best_individuals.append(best_solution[1]);
evaluations = [evaluation[0] for evaluation in evaluations]
if iteration_counter == max_iterations:
break;
parent_pool = select_parent_pool(population, evaluations);
children = generate_children(parent_pool, crossover_number);
mutate_population(children);
elites = generate_elites(population, evaluations, elite_number);
# Prepare population for the next iteration
population = children + elites;
iteration_counter += 1;
if is_population_valid(population) == False:
break;
return (best_solutions, best_individuals);
def do_what_needs_to_be_done():
results = [];
bests = [];
print("***** ALGORITHM START *****");
sys.stdout.flush()
for i in range(0, 10):
print("Starting cycle " + str(i+1));
results.append(perform_GA());
bests.append((results[i][0][-1], results[i][1][-1]));
best_ind = bests.index(min(bests, key=lambda best:best[0]));
print(str(best_ind));
print("***** RESULTS *****");
print("Best result is " + str(bests[best_ind][0]));
print("Best result is " + str(bests[best_ind][1]));
plt.plot(results[best_ind][0]);
plt.show();
#main
init();
do_what_needs_to_be_done()
| mit |
tosolveit/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
yonglehou/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
TheWylieStCoyote/gnuradio | gr-digital/examples/berawgn.py | 3 | 4409 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
BER simulation for QPSK signals, compare to theoretical values.
Change the N_BITS value to simulate more bits per Eb/N0 value,
thus allowing to check for lower BER values.
Lower values will work faster, higher values will use a lot of RAM.
Also, this app isn't highly optimized--the flow graph is completely
reinstantiated for every Eb/N0 value.
Of course, expect the maximum value for BER to be one order of
magnitude below what you chose for N_BITS.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import math
import numpy
from gnuradio import gr, digital
from gnuradio import analog
from gnuradio import blocks
import sys
try:
from scipy.special import erfc
except ImportError:
print("Error: could not import scipy (http://www.scipy.org/)")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
# Best to choose powers of 10
N_BITS = 1e7
RAND_SEED = 42
def berawgn(EbN0):
""" Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """
return 0.5 * erfc(math.sqrt(10**(float(EbN0) / 10)))
class BitErrors(gr.hier_block2):
""" Two inputs: true and received bits. We compare them and
add up the number of incorrect bits. Because integrate_ff()
can only add up a certain number of values, the output is
not a scalar, but a sequence of values, the sum of which is
the BER. """
def __init__(self, bits_per_byte):
gr.hier_block2.__init__(self, "BitErrors",
gr.io_signature(2, 2, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_int))
# Bit comparison
comp = blocks.xor_bb()
intdump_decim = 100000
if N_BITS < intdump_decim:
intdump_decim = int(N_BITS)
self.connect(self,
comp,
blocks.unpack_k_bits_bb(bits_per_byte),
blocks.uchar_to_float(),
blocks.integrate_ff(intdump_decim),
blocks.multiply_const_ff(1.0 / N_BITS),
self)
self.connect((self, 1), (comp, 1))
class BERAWGNSimu(gr.top_block):
" This contains the simulation flow graph "
def __init__(self, EbN0):
gr.top_block.__init__(self)
self.const = digital.qpsk_constellation()
# Source is N_BITS bits, non-repeated
data = list(map(int, numpy.random.randint(0, self.const.arity(), N_BITS / self.const.bits_per_symbol())))
src = blocks.vector_source_b(data, False)
mod = digital.chunks_to_symbols_bc((self.const.points()), 1)
add = blocks.add_vcc()
noise = analog.noise_source_c(analog.GR_GAUSSIAN,
self.EbN0_to_noise_voltage(EbN0),
RAND_SEED)
demod = digital.constellation_decoder_cb(self.const.base())
ber = BitErrors(self.const.bits_per_symbol())
self.sink = blocks.vector_sink_f()
self.connect(src, mod, add, demod, ber, self.sink)
self.connect(noise, (add, 1))
self.connect(src, (ber, 1))
def EbN0_to_noise_voltage(self, EbN0):
""" Converts Eb/N0 to a complex noise voltage (assuming unit symbol power) """
return 1.0 / math.sqrt(self.const.bits_per_symbol( * 10**(float(EbN0) / 10)))
def simulate_ber(EbN0):
""" All the work's done here: create flow graph, run, read out BER """
print("Eb/N0 = %d dB" % EbN0)
fg = BERAWGNSimu(EbN0)
fg.run()
return numpy.sum(fg.sink.data())
if __name__ == "__main__":
EbN0_min = 0
EbN0_max = 15
EbN0_range = list(range(EbN0_min, EbN0_max+1))
ber_theory = [berawgn(x) for x in EbN0_range]
print("Simulating...")
ber_simu = [simulate_ber(x) for x in EbN0_range]
f = pyplot.figure()
s = f.add_subplot(1,1,1)
s.semilogy(EbN0_range, ber_theory, 'g-.', label="Theoretical")
s.semilogy(EbN0_range, ber_simu, 'b-o', label="Simulated")
s.set_title('BER Simulation')
s.set_xlabel('Eb/N0 (dB)')
s.set_ylabel('BER')
s.legend()
s.grid()
pyplot.show()
| gpl-3.0 |
xingjiepan/cylinder_fitting | cylinder_fitting/visualize.py | 2 | 2006 | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from .geometry import rotation_matrix_from_axis_and_angle
from . import fitting
def show_G_distribution(data):
'''Show the distribution of the G function.'''
Xs, t = fitting.preprocess_data(data)
Theta, Phi = np.meshgrid(np.linspace(0, np.pi, 50), np.linspace(0, 2 * np.pi, 50))
G = []
for i in range(len(Theta)):
G.append([])
for j in range(len(Theta[i])):
w = fitting.direction(Theta[i][j], Phi[i][j])
G[-1].append(fitting.G(w, Xs))
plt.imshow(G, extent=[0, np.pi, 0, 2 * np.pi], origin='lower')
plt.show()
def show_fit(w_fit, C_fit, r_fit, Xs):
'''Plot the fitting given the fitted axis direction, the fitted
center, the fitted radius and the data points.
'''
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the data points
ax.scatter([X[0] for X in Xs], [X[1] for X in Xs], [X[2] for X in Xs])
# Get the transformation matrix
theta = np.arccos(np.dot(w_fit, np.array([0, 0, 1])))
phi = np.arctan2(w_fit[1], w_fit[0])
M = np.dot(rotation_matrix_from_axis_and_angle(np.array([0, 0, 1]), phi),
rotation_matrix_from_axis_and_angle(np.array([0, 1, 0]), theta))
# Plot the cylinder surface
delta = np.linspace(-np.pi, np.pi, 20)
z = np.linspace(-10, 10, 20)
Delta, Z = np.meshgrid(delta, z)
X = r_fit * np.cos(Delta)
Y = r_fit * np.sin(Delta)
for i in range(len(X)):
for j in range(len(X[i])):
p = np.dot(M, np.array([X[i][j], Y[i][j], Z[i][j]])) + C_fit
X[i][j] = p[0]
Y[i][j] = p[1]
Z[i][j] = p[2]
ax.plot_surface(X, Y, Z, alpha=0.2)
# Plot the center and direction
ax.quiver(C_fit[0], C_fit[1], C_fit[2],
r_fit * w_fit[0], r_fit * w_fit[1], r_fit * w_fit[2], color='red')
plt.show()
| bsd-3-clause |
Barmaley-exe/scikit-learn | sklearn/decomposition/nmf.py | 24 | 19057 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
cbecker/iiboost | python/tests/python_channels_test_class.py | 1 | 2632 | ###################################################################################
# Test for the IIBoost wrapper class
###################################################################################
from iiboost import Booster, EigenVectorsOfHessianImage, computeEigenVectorsOfHessianImage, computeIntegralImage, ROICoordinates
from sklearn.externals import joblib # to load data
import numpy as np
# to show something
import matplotlib.pyplot as plt
# load data
gt = joblib.load("../../testData/gt.jlb")
img = joblib.load("../../testData/img.jlb")
# let's pretend we have 3 image stacks with different number of ROIs
# with its corresponding gt and 2 feature channels
img3 = img2 = img1 = img
gt3 = gt2 = gt1 = gt
model = Booster()
imgFloat = np.float32(img)
iiImage = computeIntegralImage( imgFloat )
# again, this is stupid, just presume the second channel is a different feature
channel1 = iiImage
channel2 = iiImage
channels3 = channels2 = channels1 = [channel1,channel2]
# anisotropy factor is the ratio between z voxel size and x/y voxel size.
# if Isotropic -> 1.0
zAnisotropyFactor = 1.0;
# this is typically a good value, but it depends on the voxel size of the data
hessianSigma = 3.5
eigV1 = computeEigenVectorsOfHessianImage( img1, zAnisotropyFactor, hessianSigma )
eigV2 = computeEigenVectorsOfHessianImage( img2, zAnisotropyFactor, hessianSigma )
eigV3 = computeEigenVectorsOfHessianImage( img3, zAnisotropyFactor, hessianSigma )
# Train: note that we pass a list of stacks
model.trainWithChannels( [img1,img2,img3], [eigV1, eigV2, eigV3], [gt1,gt2,gt3], [channels1,channels2,channels3],
zAnisotropyFactor, numStumps=100, gtNegativeLabel=1, gtPositiveLabel=2, debugOutput=True)
pred = model.predictWithChannels( img, eigV1, channels1, zAnisotropyFactor, useEarlyStopping=True)
roi = ROICoordinates()
roi.x2 = img.shape[2] - 1
roi.y2 = img.shape[1] - 1
roi.z1 = roi.z2 = img.shape[0] / 2
predSingleSlice = model.predictWithChannels( img, eigV1, channels1, zAnisotropyFactor, useEarlyStopping=True, subROI=roi)
# show image & prediction side by side
plt.ion()
plt.figure()
plt.subplot(1,3,1)
plt.imshow(img[roi.z1,:,:],cmap="gray")
plt.title("Click on the image to exit")
plt.subplot(1,3,2)
plt.imshow(pred[roi.z1,:,:],cmap="gray")
plt.title("Click on the image to exit")
plt.subplot(1,3,3)
plt.imshow(predSingleSlice[roi.z1,:,:],cmap="gray")
plt.title("Click on the image to exit")
plt.ginput(1)
print "Serializing..."
ss = model.serialize()
print "Deserializing..."
model.deserialize( ss )
print "DONE."
#print "Serialization string: " + model.serialize()
| gpl-3.0 |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactbroker/test_duns.py | 1 | 18388 | import os
import datetime
import pandas as pd
from dataactcore.config import CONFIG_BROKER
from dataactcore.scripts import load_duns_exec_comp
from dataactcore.models.domainModels import DUNS
from dataactcore.utils.duns import DUNS_COLUMNS, EXCLUDE_FROM_API
def mock_get_duns_props_from_sam(duns_list):
""" Mock function for get_duns_props as we can't connect to the SAM service """
request_cols = [col for col in DUNS_COLUMNS if col not in EXCLUDE_FROM_API]
columns = request_cols
results = pd.DataFrame(columns=columns)
duns_mappings = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'legal_business_name': 'Legal Name 1',
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '000000004',
'ultimate_parent_uei': 'D4',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': [['A', 'B', 'C']],
'business_types': [['Name A', 'Name B', 'Name C']],
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000005': {
'awardee_or_recipient_uniqu': '000000005',
'uei': 'E5',
'legal_business_name': 'Legal Name 2',
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': None,
'ultimate_parent_uei': None,
'ultimate_parent_legal_enti': None,
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': [['D', 'E', 'F']],
'business_types': [['Name D', 'Name E', 'Name F']],
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
}
}
for duns in duns_list:
if duns in duns_mappings:
results = results.append(pd.DataFrame(dict([(k, pd.Series(v)) for k, v in duns_mappings[duns].items()])),
sort=True)
return results
def test_load_duns(database, monkeypatch):
""" Test a local run load duns with the test files """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
duns_dir = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'fake_sam_files', 'duns')
load_duns_exec_comp.load_from_sam('DUNS', sess, True, duns_dir)
# update if the fake DUNS file name/zip changes
deactivation_date = '2021-02-06'
expected_results = {
# Pulled active monthly record, slightly updated with deactivation date as sam_extract = 1
'000000001': {
'uei': 'A1',
'awardee_or_recipient_uniqu': '000000001',
'registration_date': '1999-01-01',
'activation_date': '1999-01-02',
'expiration_date': '1999-01-25',
'last_sam_mod_date': '1999-01-15',
'deactivation_date': deactivation_date,
'legal_business_name': 'LEGAL BUSINESS NAME 000000001 V1 MONTHLY',
'address_line_1': 'ADDRESS LINE 1 000000001 V1 MONTHLY',
'address_line_2': 'ADDRESS LINE 2 000000001 V1 MONTHLY',
'city': 'CITY 000000001 V1 MONTHLY',
'state': 'ST 000000001 V1 MONTHLY',
'zip': 'ZIP 000000001 V1 MONTHLY',
'zip4': 'ZIP4 000000001 V1 MONTHLY',
'country_code': 'COUNTRY 000000001 V1 MONTHLY',
'congressional_district': 'CONGRESSIONAL DISTRICT 000000001 V1 MONTHLY',
'business_types_codes': ['2X', 'MF'],
'business_types': ['For Profit Organization', 'Manufacturer of Goods'],
'dba_name': 'DBA NAME 000000001 V1 MONTHLY',
'ultimate_parent_uei': 'D4',
'ultimate_parent_unique_ide': '000000004',
'ultimate_parent_legal_enti': 'ULTIMATE PARENT LEGAL BUSINESS NAME 000000004 V1 MONTHLY',
'historic': False
},
# Pulled active monthly record, updated as sam_extract = 2, don't pull in dup delete record
'000000002': {
'uei': 'B2',
'awardee_or_recipient_uniqu': '000000002',
'registration_date': '2000-02-01',
'activation_date': '2000-02-02',
'expiration_date': '2000-02-25',
'last_sam_mod_date': '2000-02-15',
'deactivation_date': None,
'legal_business_name': 'LEGAL BUSINESS NAME B2 V2 DAILY',
'address_line_1': 'ADDRESS LINE 1 B2 V2 DAILY',
'address_line_2': 'ADDRESS LINE 2 B2 V2 DAILY',
'city': 'CITY B2 V2 DAILY',
'state': 'ST B2 V2 DAILY',
'zip': 'ZIP B2 V2 DAILY',
'zip4': 'ZIP4 B2 V2 DAILY',
'country_code': 'COUNTRY B2 V2 DAILY',
'congressional_district': 'CONGRESSIONAL DISTRICT B2 V2 DAILY',
'business_types_codes': ['2X', 'MF'],
'business_types': ['For Profit Organization', 'Manufacturer of Goods'],
'dba_name': 'DBA NAME B2 V2 DAILY',
'ultimate_parent_uei': 'E5',
'ultimate_parent_unique_ide': '000000005',
'ultimate_parent_legal_enti': 'ULTIMATE PARENT LEGAL BUSINESS NAME E5 V2 DAILY',
'historic': False
},
# Pulled active monthly record, updated as sam_extract = 3
'000000003': {
'uei': 'C3',
'awardee_or_recipient_uniqu': '000000003',
'registration_date': '2000-03-01',
'activation_date': '2000-03-02',
'expiration_date': '2000-03-25',
'last_sam_mod_date': '2000-03-15',
'deactivation_date': None,
'legal_business_name': 'LEGAL BUSINESS NAME C3 V2 DAILY',
'address_line_1': 'ADDRESS LINE 1 C3 V2 DAILY',
'address_line_2': 'ADDRESS LINE 2 C3 V2 DAILY',
'city': 'CITY C3 V2 DAILY',
'state': 'ST C3 V2 DAILY',
'zip': 'ZIP C3 V2 DAILY',
'zip4': 'ZIP4 C3 V2 DAILY',
'country_code': 'COUNTRY C3 V2 DAILY',
'congressional_district': 'CONGRESSIONAL DISTRICT C3 V2 DAILY',
'business_types_codes': ['2X', 'MF'],
'business_types': ['For Profit Organization', 'Manufacturer of Goods'],
'dba_name': 'DBA NAME C3 V2 DAILY',
'ultimate_parent_uei': 'F6',
'ultimate_parent_unique_ide': '000000006',
'ultimate_parent_legal_enti': 'ULTIMATE PARENT LEGAL BUSINESS NAME F6 V2 DAILY',
'historic': False
},
# Pulled active daily V1 record, updated in daily V2 record
'000000004': {
'uei': 'D4',
'awardee_or_recipient_uniqu': '000000004',
'registration_date': '2000-03-01',
'activation_date': '2000-03-02',
'expiration_date': '2000-03-25',
'last_sam_mod_date': '2000-03-15',
'deactivation_date': None,
'legal_business_name': 'LEGAL BUSINESS NAME D4 V2 DAILY',
'address_line_1': 'ADDRESS LINE 1 D4 V2 DAILY',
'address_line_2': 'ADDRESS LINE 2 D4 V2 DAILY',
'city': 'CITY D4 V2 DAILY',
'state': 'ST D4 V2 DAILY',
'zip': 'ZIP D4 V2 DAILY',
'zip4': 'ZIP4 D4 V2 DAILY',
'country_code': 'COUNTRY D4 V2 DAILY',
'congressional_district': 'CONGRESSIONAL DISTRICT D4 V2 DAILY',
'business_types_codes': ['2X', 'MF'],
'business_types': ['For Profit Organization', 'Manufacturer of Goods'],
'dba_name': 'DBA NAME D4 V2 DAILY',
'ultimate_parent_uei': 'G7',
'ultimate_parent_unique_ide': '000000007',
'ultimate_parent_legal_enti': 'ULTIMATE PARENT LEGAL BUSINESS NAME G7 V2 DAILY',
'historic': False
},
# Pulled active daily V1 record, not updated in V2
'000000005': {
'uei': 'E5',
'awardee_or_recipient_uniqu': '000000005',
'registration_date': '2000-02-01',
'activation_date': '2000-02-02',
'expiration_date': '2000-02-25',
'last_sam_mod_date': '2000-02-15',
'deactivation_date': None,
'legal_business_name': 'LEGAL BUSINESS NAME 000000005 V1 DAILY',
'address_line_1': 'ADDRESS LINE 1 000000005 V1 DAILY',
'address_line_2': 'ADDRESS LINE 2 000000005 V1 DAILY',
'city': 'CITY 000000005 V1 DAILY',
'state': 'ST 000000005 V1 DAILY',
'zip': 'ZIP 000000005 V1 DAILY',
'zip4': 'ZIP4 000000005 V1 DAILY',
'country_code': 'COUNTRY 000000005 V1 DAILY',
'congressional_district': 'CONGRESSIONAL DISTRICT 000000005 V1 DAILY',
'business_types_codes': ['2X', 'MF'],
'business_types': ['For Profit Organization', 'Manufacturer of Goods'],
'dba_name': 'DBA NAME 000000005 V1 DAILY',
'ultimate_parent_uei': None,
'ultimate_parent_unique_ide': '000000007',
'ultimate_parent_legal_enti': 'ULTIMATE PARENT LEGAL BUSINESS NAME G7 V2 DAILY', # via missing parent names
'historic': False
}
}
# Ensure duplicates are covered
expected_duns_count = 5
duns_count = sess.query(DUNS).count()
assert duns_count == expected_duns_count
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'uei': duns_obj.uei,
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'deactivation_date': str(duns_obj.deactivation_date) if duns_obj.deactivation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'historic': duns_obj.historic
}
assert results == expected_results
def test_load_exec_comp(database, monkeypatch):
""" Test a local run load exec_comp with the test files """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
duns_dir = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'fake_sam_files', 'duns')
exec_comp_dir = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'fake_sam_files', 'exec_comp')
load_duns_exec_comp.load_from_sam('DUNS', sess, True, duns_dir)
load_duns_exec_comp.load_from_sam('Executive Compensation', sess, True, exec_comp_dir, None)
monthly_last_exec_date = datetime.date(2017, 9, 30)
first_daily_exec_date = datetime.date(2019, 3, 29)
last_daily_exec_date = datetime.date(2019, 3, 30)
expected_results = {
# processed in the monthly, not updated as sam_extract = 1
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'high_comp_officer1_full_na': 'Terence Test 1',
'high_comp_officer1_amount': '11952013',
'high_comp_officer2_full_na': 'Aaron Test 1',
'high_comp_officer2_amount': '41161',
'high_comp_officer3_full_na': 'Jason Test 1',
'high_comp_officer3_amount': '286963',
'high_comp_officer4_full_na': 'Michael Test 1',
'high_comp_officer4_amount': '129337',
'high_comp_officer5_full_na': 'Mark Test 1',
'high_comp_officer5_amount': '1248877',
'last_exec_comp_mod_date': monthly_last_exec_date
},
# processed in the monthly, processed only in first daily as sam_extract = 2
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'high_comp_officer1_full_na': 'Terence Test Updated 1',
'high_comp_officer1_amount': '21952013',
'high_comp_officer2_full_na': 'Aaron Test Updated 1',
'high_comp_officer2_amount': '51161',
'high_comp_officer3_full_na': 'Jason Test Updated 1',
'high_comp_officer3_amount': '386963',
'high_comp_officer4_full_na': 'Michael Test Updated 1',
'high_comp_officer4_amount': '329337',
'high_comp_officer5_full_na': 'Mark Test Updated 1',
'high_comp_officer5_amount': '3248877',
'last_exec_comp_mod_date': first_daily_exec_date
},
# processed in the monthly, processed in both dailies as sam_extract = 3
'000000003': {
'awardee_or_recipient_uniqu': '000000003',
'high_comp_officer1_full_na': 'Terence Test Updated 2',
'high_comp_officer1_amount': '21952013',
'high_comp_officer2_full_na': 'Aaron Test Updated 2',
'high_comp_officer2_amount': '51161',
'high_comp_officer3_full_na': 'Jason Test Updated 2',
'high_comp_officer3_amount': '386963',
'high_comp_officer4_full_na': 'Michael Test Updated 2',
'high_comp_officer4_amount': '329337',
'high_comp_officer5_full_na': 'Mark Test Updated 2',
'high_comp_officer5_amount': '3248877',
'last_exec_comp_mod_date': last_daily_exec_date
},
# processed in the monthly, never updated since
'000000004': {
'awardee_or_recipient_uniqu': '000000004',
'high_comp_officer1_full_na': 'Terence Test 2',
'high_comp_officer1_amount': '11952013',
'high_comp_officer2_full_na': 'Aaron Test 2',
'high_comp_officer2_amount': '41161',
'high_comp_officer3_full_na': 'Jason Test 2',
'high_comp_officer3_amount': '286963',
'high_comp_officer4_full_na': 'Michael Test 2',
'high_comp_officer4_amount': '129337',
'high_comp_officer5_full_na': 'Mark Test 2',
'high_comp_officer5_amount': '1248877',
'last_exec_comp_mod_date': monthly_last_exec_date
},
# not included in any of the exec comp but listed in duns
'000000005': {
'awardee_or_recipient_uniqu': '000000005',
'high_comp_officer1_full_na': None,
'high_comp_officer1_amount': None,
'high_comp_officer2_full_na': None,
'high_comp_officer2_amount': None,
'high_comp_officer3_full_na': None,
'high_comp_officer3_amount': None,
'high_comp_officer4_full_na': None,
'high_comp_officer4_amount': None,
'high_comp_officer5_full_na': None,
'high_comp_officer5_amount': None,
'last_exec_comp_mod_date': None
}
}
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount,
'last_exec_comp_mod_date': duns_obj.last_exec_comp_mod_date
}
assert results == expected_results
| cc0-1.0 |
hainm/statsmodels | statsmodels/sandbox/tsa/example_arma.py | 27 | 11572 | '''trying to verify theoretical acf of arma
explicit functions for autocovariance functions of ARIMA(1,1), MA(1), MA(2)
plus 3 functions from nitime.utils
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.mlab as mlab
from statsmodels.tsa.arima_process import arma_generate_sample, arma_impulse_response
from statsmodels.tsa.arima_process import arma_acovf, arma_acf, ARIMA
#from movstat import acf, acovf
#from statsmodels.sandbox.tsa import acf, acovf, pacf
from statsmodels.tsa.stattools import acf, acovf, pacf
ar = [1., -0.6]
#ar = [1., 0.]
ma = [1., 0.4]
#ma = [1., 0.4, 0.6]
#ma = [1., 0.]
mod = ''#'ma2'
x = arma_generate_sample(ar, ma, 5000)
x_acf = acf(x)[:10]
x_ir = arma_impulse_response(ar, ma)
#print x_acf[:10]
#print x_ir[:10]
#irc2 = np.correlate(x_ir,x_ir,'full')[len(x_ir)-1:]
#print irc2[:10]
#print irc2[:10]/irc2[0]
#print irc2[:10-1] / irc2[1:10]
#print x_acf[:10-1] / x_acf[1:10]
# detrend helper from matplotlib.mlab
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def acovf_explicit(ar, ma, nobs):
'''add correlation of MA representation explicitely
'''
ir = arma_impulse_response(ar, ma)
acovfexpl = [np.dot(ir[:nobs-t], ir[t:nobs]) for t in range(10)]
return acovfexpl
def acovf_arma11(ar, ma):
# ARMA(1,1)
# Florens et al page 278
# wrong result ?
# new calculation bigJudge p 311, now the same
a = -ar[1]
b = ma[1]
#rho = [1.]
#rho.append((1-a*b)*(a-b)/(1.+a**2-2*a*b))
rho = [(1.+b**2+2*a*b)/(1.-a**2)]
rho.append((1+a*b)*(a+b)/(1.-a**2))
for _ in range(8):
last = rho[-1]
rho.append(a*last)
return np.array(rho)
# print acf11[:10]
# print acf11[:10] /acf11[0]
def acovf_ma2(ma):
# MA(2)
# from Greene p616 (with typo), Florens p280
b1 = -ma[1]
b2 = -ma[2]
rho = np.zeros(10)
rho[0] = (1 + b1**2 + b2**2)
rho[1] = (-b1 + b1*b2)
rho[2] = -b2
return rho
# rho2 = rho/rho[0]
# print rho2
# print irc2[:10]/irc2[0]
def acovf_ma1(ma):
# MA(1)
# from Greene p616 (with typo), Florens p280
b = -ma[1]
rho = np.zeros(10)
rho[0] = (1 + b**2)
rho[1] = -b
return rho
# rho2 = rho/rho[0]
# print rho2
# print irc2[:10]/irc2[0]
ar1 = [1., -0.8]
ar0 = [1., 0.]
ma1 = [1., 0.4]
ma2 = [1., 0.4, 0.6]
ma0 = [1., 0.]
comparefn = dict(
[('ma1', acovf_ma1),
('ma2', acovf_ma2),
('arma11', acovf_arma11),
('ar1', acovf_arma11)])
cases = [('ma1', (ar0, ma1)),
('ma2', (ar0, ma2)),
('arma11', (ar1, ma1)),
('ar1', (ar1, ma0))]
for c, args in cases:
ar, ma = args
print('')
print(c, ar, ma)
myacovf = arma_acovf(ar, ma, nobs=10)
myacf = arma_acf(ar, ma, nobs=10)
if c[:2]=='ma':
othacovf = comparefn[c](ma)
else:
othacovf = comparefn[c](ar, ma)
print(myacovf[:5])
print(othacovf[:5])
#something broke again,
#for high persistence case eg ar=0.99, nobs of IR has to be large
#made changes to arma_acovf
assert_array_almost_equal(myacovf, othacovf,10)
assert_array_almost_equal(myacf, othacovf/othacovf[0],10)
#from nitime.utils
def ar_generator(N=512, sigma=1.):
# this generates a signal u(n) = a1*u(n-1) + a2*u(n-2) + ... + v(n)
# where v(n) is a stationary stochastic process with zero mean
# and variance = sigma
# this sequence is shown to be estimated well by an order 8 AR system
taps = np.array([2.7607, -3.8106, 2.6535, -0.9238])
v = np.random.normal(size=N, scale=sigma**0.5)
u = np.zeros(N)
P = len(taps)
for l in range(P):
u[l] = v[l] + np.dot(u[:l][::-1], taps[:l])
for l in range(P,N):
u[l] = v[l] + np.dot(u[l-P:l][::-1], taps)
return u, v, taps
#JP: small differences to using np.correlate, because assumes mean(s)=0
# denominator is N, not N-k, biased estimator
# misnomer: (biased) autocovariance not autocorrelation
#from nitime.utils
def autocorr(s, axis=-1):
"""Returns the autocorrelation of signal s at all lags. Adheres to the
definition r(k) = E{s(n)s*(n-k)} where E{} is the expectation operator.
"""
N = s.shape[axis]
S = np.fft.fft(s, n=2*N-1, axis=axis)
sxx = np.fft.ifft(S*S.conjugate(), axis=axis).real[:N]
return sxx/N
#JP: with valid this returns a single value, if x and y have same length
# e.g. norm_corr(x, x)
# using std subtracts mean, but correlate doesn't, requires means are exactly 0
# biased, no n-k correction for laglength
#from nitime.utils
def norm_corr(x,y,mode = 'valid'):
"""Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation"""
return ( np.correlate(x,y,mode) /
(np.std(x)*np.std(y)*(x.shape[-1])) )
# from matplotlib axes.py
# note: self is axis
def pltacorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=True, detrend=detrend_none, usevlines=True,
maxlags=10, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`
For documentation on valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
def pltxcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
call signature::
def xcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
d = self.plot(lags, c, **kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
arrvs = ar_generator()
##arma = ARIMA()
##res = arma.fit(arrvs[0], 4, 0)
arma = ARIMA(arrvs[0])
res = arma.fit((4,0, 0))
print(res[0])
acf1 = acf(arrvs[0])
acovf1b = acovf(arrvs[0], unbiased=False)
acf2 = autocorr(arrvs[0])
acf2m = autocorr(arrvs[0]-arrvs[0].mean())
print(acf1[:10])
print(acovf1b[:10])
print(acf2[:10])
print(acf2m[:10])
x = arma_generate_sample([1.0, -0.8], [1.0], 500)
print(acf(x)[:20])
import statsmodels.api as sm
print(sm.regression.yule_walker(x, 10))
import matplotlib.pyplot as plt
#ax = plt.axes()
plt.plot(x)
#plt.show()
plt.figure()
pltxcorr(plt,x,x)
plt.figure()
pltxcorr(plt,x,x, usevlines=False)
plt.figure()
#FIXME: plotacf was moved to graphics/tsaplots.py, and interface changed
plotacf(plt, acf1[:20], np.arange(len(acf1[:20])), usevlines=True)
plt.figure()
ax = plt.subplot(211)
plotacf(ax, acf1[:20], usevlines=True)
ax = plt.subplot(212)
plotacf(ax, acf1[:20], np.arange(len(acf1[:20])), usevlines=False)
#plt.show()
| bsd-3-clause |
MarkusHackspacher/unknown-horizons | development/combat_ai/diplomacy_graphs.py | 1 | 4116 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
"""
This is a balancing tool for diplomacy which makes setting diplomacy parameters (such as mid, root or peek) easier.
It requires matplotlib (along with pylab) library in order to plot functions.
Usage:
1. Run the script from UH root, i.e. python development/combat_ai/diplomacy_graphs.py
2. A graph should appear on screen displaying current functions for each of the settings (see parameter_sets below)
3. After you close the plot window, next one should appear
4. Uncomment functions from parameter_sets you don't want to have displayed
"""
from __future__ import print_function
import sys
import pylab
sys.path.append(".")
sys.path.append("./horizons")
sys.path.append("./horizons/util")
try:
import run_uh
except ImportError as e:
print(e.message)
print("Please run from Unknown Horizons root dir")
sys.exit(1)
from run_uh import init_environment
init_environment(False)
import horizons.main
from horizons.ai.aiplayer.behavior.diplomacysettings import DiplomacySettings
from horizons.ai.aiplayer.behavior.behaviorcomponents import BehaviorDiplomatic
_move_f = BehaviorDiplomatic._move_f
_get_quadratic_function = BehaviorDiplomatic._get_quadratic_function
get_enemy_function = BehaviorDiplomatic.get_enemy_function
get_ally_function = BehaviorDiplomatic.get_ally_function
get_neutral_function = BehaviorDiplomatic.get_neutral_function
def diplomacy_graph():
header = "Diplomacy function"
x_label = "relationship_score"
y_label = "probability"
# define functions here to plot them.
# Second parameter is color
upper_boundary = DiplomacySettings.upper_boundary
parameter_sets = (
("BehaviorGood.allied_player", DiplomacySettings.Good.parameters_allied),
("BehaviorGood.neutral_player", DiplomacySettings.Good.parameters_neutral),
("BehaviorGood.hostile_player", DiplomacySettings.Good.parameters_hostile),
("BehaviorNeutral.allied_player", DiplomacySettings.Neutral.parameters_hostile),
("BehaviorNeutral.neutral_player", DiplomacySettings.Neutral.parameters_neutral),
("BehaviorNeutral.hostile_player", DiplomacySettings.Neutral.parameters_hostile),
("BehaviorEvil.allied_player", DiplomacySettings.Evil.parameters_hostile),
("BehaviorEvil.neutral_player", DiplomacySettings.Evil.parameters_neutral),
("BehaviorEvil.hostile_player", DiplomacySettings.Evil.parameters_hostile),
)
for parameter_name, parameters in parameter_sets:
# always print upper boundary
x = [-10, 10]
y = [upper_boundary] * 2
pylab.plot(x, y, color='y', marker=None)
functions = []
if 'enemy' in parameters:
functions.append((get_enemy_function(**parameters['enemy']), 'r'))
if 'ally' in parameters:
functions.append((get_ally_function(**parameters['ally']), 'g'))
if 'neutral' in parameters:
functions.append((get_neutral_function(**parameters['neutral']), 'b'))
for f, c in functions:
gen = [(x / 10.0, f(x / 10.0)) for x in xrange(-100, 100)]
x = [item[0] for item in gen]
y = [item[1] for item in gen]
pylab.plot(x, y, color=c, marker=None)
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.title(parameter_name)
pylab.grid(True)
pylab.show()
if __name__ == "__main__":
diplomacy_graph()
| gpl-2.0 |
cggh/scikit-allel | docs/conf.py | 1 | 9899 | # -*- coding: utf-8 -*-
#
# scikit-allel documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 27 09:37:32 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from mock import Mock as MagicMock
import allel.version
import sphinx.environment
from docutils.utils import get_source_line
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['scipy', 'scipy.stats', 'scipy.spatial', 'scipy.linalg',
'scipy.spatial.distance', 'matplotlib', 'matplotlib.pyplot',
'matplotlib.image', 'ipython', 'numexpr', 'sklearn', 'sklearn.decomposition',
'h5py', 'rpy2', 'rpy2.robjects', 'rpy2.robjects.numpy2ri',
'rpy2.robjects.packages', 'sklearn.manifold', 'scipy.special', 'pandas',
'dask', 'dask.array']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# monkey-patch to avoid non-local URI warnings
# http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
def _warn_node(self, msg, node):
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
sphinx.environment.BuildEnvironment.warn_node = _warn_node
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
'numpydoc',
'sphinx_issues',
]
issues_github_path = 'cggh/scikit-allel'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scikit-allel'
copyright = u'2015, Alistair Miles'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(allel.version.version.split('.')[:3])
# The full version, including alpha/beta/rc tags.
release = allel.version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'allel/opt']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-alleldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'scikit-allel.tex', u'scikit-allel Documentation',
u'Alistair Miles', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scikit-allel', u'scikit-allel Documentation',
[u'Alistair Miles'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'scikit-allel', u'scikit-allel Documentation',
u'Alistair Miles', 'scikit-allel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
# 'http://docs.python.org/': None,
# 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
# 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
# 'matplotlib': ('http://matplotlib.sourceforge.net/', None)
}
# autodoc config
autoclass_content = 'class'
# numpydoc config
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
| mit |
openhumanoids/pronto | motion_estimate/scripts/footstep_spoof.py | 2 | 2602 | #!/usr/bin/python
# TODO: check at count is entirely mono-tonic and no missing packets
import os,sys
import lcm
import time
from lcm import LCM
from math import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from threading import Thread
import threading
from drc_utils import *
home_dir =os.getenv("HOME")
#print home_dir
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/site-packages")
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/dist-packages")
from drc.deprecated_footstep_plan_t import deprecated_footstep_plan_t
from bot_core.pose_t import pose_t
########################################################################################
def timestamp_now (): return int (time.time () * 1000000)
class State:
def __init__(self):
self.last_utime =0
self.got_pose_bdi =False
self.got_pose_body =False
self.pose_bdi = BotTrans()
self.pose_body = BotTrans()
def on_pose_bdi(channel, data):
m = pose_t.decode(data)
if (state.got_pose_bdi == False): print "Received",channel
state.got_pose_bdi = True
state.pose_bdi = getBotCorePose3dAsBotTrans(m)
#temp = trans_invert( state.pose_body ) ;
#q = trans_apply_trans( state.pose_bdi,temp );
#q.print_out()
def on_pose_body(channel, data):
m = pose_t.decode(data)
if (state.got_pose_body == False): print "Received",channel
state.got_pose_body = True
state.pose_body = getBotCorePose3dAsBotTrans(m)
def on_deprecated_footstep_plan(channel, data):
if ( (state.got_pose_bdi == False) or (state.got_pose_body == False) ):
print "havent got POSE_BODY_ALT and POSE_BODY yet"
return
m = deprecated_footstep_plan_t.decode(data)
print m.utime,"Republishing", m.num_steps, "steps"
temp = trans_invert( state.pose_body ) ;
q = trans_apply_trans( state.pose_bdi,temp );
for i in range(0,m.num_steps):
p = getDrcPosition3dAsBotTrans(m.footstep_goals[i].pos)
temp = trans_invert( state.pose_body ) ;
body_to_step = trans_apply_trans( p,temp );
#body_to_step.print_out()
q2 = trans_apply_trans( body_to_step,state.pose_bdi )
m.footstep_goals[i].pos = getBotTransAsDrcPosition3d(q2)
lc.publish("CANDIDATE_BDI_FOOTSTEP_PLAN",m.encode())
####################################################################
lc = lcm.LCM()
print "started"
state = State()
sub1 = lc.subscribe("POSE_BODY_ALT", on_pose_bdi)
sub2 = lc.subscribe("POSE_BODY", on_pose_body)
sub3 = lc.subscribe("CANDIDATE_BDI_FOOTSTEP_PLAN_MIT_FRAME", on_deprecated_footstep_plan)
while True:
lc.handle()
| lgpl-2.1 |
waynenilsen/statsmodels | statsmodels/sandbox/distributions/mv_measures.py | 33 | 6257 | '''using multivariate dependence and divergence measures
The standard correlation coefficient measures only linear dependence between
random variables.
kendall's tau measures any monotonic relationship also non-linear.
mutual information measures any kind of dependence, but does not distinguish
between positive and negative relationship
mutualinfo_kde and mutualinfo_binning follow Khan et al. 2007
Shiraj Khan, Sharba Bandyopadhyay, Auroop R. Ganguly, Sunil Saigal,
David J. Erickson, III, Vladimir Protopopescu, and George Ostrouchov,
Relative performance of mutual information estimation methods for
quantifying the dependence among short and noisy data,
Phys. Rev. E 76, 026209 (2007)
http://pre.aps.org/abstract/PRE/v76/i2/e026209
'''
import numpy as np
from scipy import stats
from scipy.stats import gaussian_kde
import statsmodels.sandbox.infotheo as infotheo
def mutualinfo_kde(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
yx = np.vstack((y,x))
kde_x = gaussian_kde(x)(x)
kde_y = gaussian_kde(y)(y)
kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y)
mi = mi_obs.sum() / nobs
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_kde_2sample(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
kde_x = gaussian_kde(x.T)(x.T)
kde_y = gaussian_kde(y.T)(x.T)
#kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_x) - np.log(kde_y)
if len(mi_obs) != nobs:
raise ValueError("Wrong number of observations")
mi = mi_obs.mean()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_binned(y, x, bins, normed=True):
'''mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
## fyx, binsy, binsx = np.histogram2d(y, x, bins=bins)
## fx, binsx_ = np.histogram(x, bins=binsx)
## fy, binsy_ = np.histogram(y, bins=binsy)
if bins == 'auto':
ys = np.sort(y)
xs = np.sort(x)
#quantiles = np.array([0,0.25, 0.4, 0.6, 0.75, 1])
qbin_sqr = np.sqrt(5./nobs)
quantiles = np.linspace(0, 1, 1./qbin_sqr)
quantile_index = ((nobs-1)*quantiles).astype(int)
#move edges so that they don't coincide with an observation
shift = 1e-6 + np.ones(quantiles.shape)
shift[0] -= 2*1e-6
binsy = ys[quantile_index] + shift
binsx = xs[quantile_index] + shift
elif np.size(bins) == 1:
binsy = bins
binsx = bins
elif (len(bins) == 2):
binsy, binsx = bins
## if np.size(bins[0]) == 1:
## binsx = bins[0]
## if np.size(bins[1]) == 1:
## binsx = bins[1]
fx, binsx = np.histogram(x, bins=binsx)
fy, binsy = np.histogram(y, bins=binsy)
fyx, binsy, binsx = np.histogram2d(y, x, bins=(binsy, binsx))
pyx = fyx * 1. / nobs
px = fx * 1. / nobs
py = fy * 1. / nobs
mi_obs = pyx * (np.log(pyx+1e-10) - np.log(py)[:,None] - np.log(px))
mi = mi_obs.sum()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed, (pyx, py, px, binsy, binsx), mi_obs
else:
return mi
if __name__ == '__main__':
import statsmodels.api as sm
funtype = ['linear', 'quadratic'][1]
nobs = 200
sig = 2#5.
#x = np.linspace(-3, 3, nobs) + np.random.randn(nobs)
x = np.sort(3*np.random.randn(nobs))
exog = sm.add_constant(x, prepend=True)
#y = 0 + np.log(1+x**2) + sig * np.random.randn(nobs)
if funtype == 'quadratic':
y = 0 + x**2 + sig * np.random.randn(nobs)
if funtype == 'linear':
y = 0 + x + sig * np.random.randn(nobs)
print('correlation')
print(np.corrcoef(y,x)[0, 1])
print('pearsonr', stats.pearsonr(y,x))
print('spearmanr', stats.spearmanr(y,x))
print('kendalltau', stats.kendalltau(y,x))
pxy, binsx, binsy = np.histogram2d(x,y, bins=5)
px, binsx_ = np.histogram(x, bins=binsx)
py, binsy_ = np.histogram(y, bins=binsy)
print('mutualinfo', infotheo.mutualinfo(px*1./nobs, py*1./nobs,
1e-15+pxy*1./nobs, logbase=np.e))
print('mutualinfo_kde normed', mutualinfo_kde(y,x))
print('mutualinfo_kde ', mutualinfo_kde(y,x, normed=False))
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 5, normed=True)
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 'auto', normed=True)
print('auto')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
ys = np.sort(y)
xs = np.sort(x)
by = ys[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
bx = xs[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, (by,bx), normed=True)
print('quantiles')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
doplot = 1#False
if doplot:
import matplotlib.pyplot as plt
plt.plot(x, y, 'o')
olsres = sm.OLS(y, exog).fit()
plt.plot(x, olsres.fittedvalues)
| bsd-3-clause |
jason-neal/companion_simulations | old_simulations/lmfitting.py | 1 | 5625 | """Try fitting with lmfit."""
import os
import lmfit
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from lmfit import Parameters, minimize
from spectrum_overload import Spectrum
import simulators
from obsolete.models.alpha_model import double_shifted_alpha_model
from mingle.utilities.phoenix_utils import local_normalization, spec_local_norm
from old_simulations.Planet_spectral_simulations import simple_normalization
def alpha_model_residual(params, x, data, eps_data, host_models, companion_models):
"""Residaul function to use with lmfit Minimizer."""
alpha = params['alpha'].value
rv1 = params['rv1'].value
rv2 = params['rv2'].value
host_index = params['host_index'].value
companion_index = params['companion_index'].value
limits = [params['min_limit'].value, params['max_limit'].value]
host = host_models[host_index]
companion = companion_models[companion_index]
print(host_models)
print("x", x, "len x", len(x))
print("alpha", alpha)
print("rv1, rv2 ", rv1, rv2)
print("host", host.xaxis, host.flux)
print("companion", companion.xaxis, companion.flux)
print(len(host), len(companion))
print(limits)
model = double_shifted_alpha_model(alpha, rv1, rv2, host, companion, limits, new_x=x)
print(data)
print(model)
print(model.xaxis)
print(model.flux)
print(eps_data)
return (data - model.flux) / eps_data
host_temp = 5300
comp_temp = 2300
# Load in some phoenix data and make a simulation
base = simulators.starfish_grid["raw_path"]
phoenix_wl = fits.getdata(os.path.join(base, "WAVE_PHOENIX-ACES-AGSS-COND-2011.fits")) / 10
host_phoenix = os.path.join(base, ("Z-0.0/lte{:05d}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits").format(host_temp))
comp_phoenix = os.path.join(base, ("Z-0.0/lte{:05d}-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits").format(comp_temp))
unnorm_host_spec = Spectrum(flux=fits.getdata(host_phoenix), xaxis=phoenix_wl)
unnorm_comp_spec = Spectrum(flux=fits.getdata(comp_phoenix), xaxis=phoenix_wl)
min_wav = 2050
max_wav = 2250
unnorm_host_spec.wav_select(min_wav, max_wav)
unnorm_comp_spec.wav_select(min_wav, max_wav)
# local normalization
norm_host_flux = local_normalization(unnorm_host_spec.xaxis, unnorm_host_spec.flux, method="exponential", plot=False)
norm_comp_flux = local_normalization(unnorm_comp_spec.xaxis, unnorm_comp_spec.flux, method="exponential", plot=False)
norm_host_spec = spec_local_norm(unnorm_host_spec, method="exponential")
norm_comp_spec = spec_local_norm(unnorm_comp_spec, method="exponential")
double_norm_host = spec_local_norm(norm_host_spec, method="quadratic")
host_spec = simple_normalization(unnorm_host_spec)
comp_spec = simple_normalization(unnorm_comp_spec)
plot = 0
if plot:
plt.subplot(311)
plt.plot(unnorm_host_spec.xaxis, unnorm_host_spec.flux, label="Host")
plt.plot(unnorm_comp_spec.xaxis, unnorm_comp_spec.flux, label="Comp")
plt.title("Unnormalized")
plt.legend()
plt.subplot(312)
plt.plot(norm_comp_spec.xaxis, norm_comp_spec.flux, label="Comp")
plt.plot(norm_host_spec.xaxis, norm_host_spec.flux, label="Host")
plt.title("Local normalization.")
plt.legend()
plt.subplot(313)
plt.plot(comp_spec.xaxis, comp_spec.flux, label="Comp")
plt.plot(host_spec.xaxis, host_spec.flux, label="Host")
plt.title("Simple normalization")
plt.legend()
plt.show()
host_models = [norm_host_spec]
comp_models = [norm_comp_spec]
def vel_vect(wav, ref_wav=None):
"""Convert wavelength to velocity vector."""
if ref_wav is None:
ref_wav = np.median(wav)
v = (wav - ref_wav) * 299792.458 / ref_wav # km/s
return v
alpha_val = 0.1
rv1_val = 0
rv2_val = -50
host_val = norm_host_spec
companion_val = norm_comp_spec
limits_val = [2100, 2180]
data_spec = double_shifted_alpha_model(alpha_val, rv1_val, rv2_val, host_val, companion_val, limits_val)
if plot:
plt.plot(vel_vect(comp_spec.xaxis), comp_spec.flux, label="Comp")
plt.plot(vel_vect(host_val.xaxis), host_val.flux, label="Host")
plt.plot(vel_vect(data_spec.xaxis), data_spec.flux, label=" CRIRES Simulation")
plt.xlim(limits_val)
plt.legend()
plt.show()
data_spec.add_noise(snr=350)
params = Parameters()
params.add('alpha', value=0.4, min=0, max=0.5)
params.add('rv1', value=0., min=-100, max=100, brute_step=0.5, vary=False)
params.add('rv2', value=-10., min=-200, max=200, brute_step=0.5, vary=True)
params.add('host_index', value=0, vary=False, brute_step=1)
params.add('companion_index', value=0, vary=False, brute_step=1)
params.add('min_limit', value=2100, vary=False)
params.add('max_limit', value=2180, vary=False)
out = minimize(alpha_model_residual, params, args=(data_spec.xaxis, data_spec.flux,
np.ones_like(data_spec.flux), host_models, comp_models))
print(out)
out.params.pretty_print()
print(lmfit.report_fit(out))
fit_params = out.params
result = double_shifted_alpha_model(fit_params['alpha'].value, fit_params['rv1'].value,
fit_params['rv2'].value, host_models[fit_params['host_index'].value],
comp_models[fit_params['companion_index'].value], [fit_params['min_limit'].value,
fit_params['max_limit'].value])
plt.plot(data_spec.xaxis, data_spec.flux, label="Simulation")
plt.plot(phoenix_wl, )
plt.plot(result.xaxis, result.flux, label="Returned fit")
plt.legend()
plt.show()
# Need to try a DIFFERENT OPTIMIZER
| mit |
malin1993ml/h-store | graphs/eviction-overhead.py | 4 | 2079 | #!/usr/bin/env python
import os
import sys
import re
import logging
import fnmatch
import string
import argparse
import pylab
import numpy as np
import matplotlib.pyplot as plot
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator
from pprint import pprint,pformat
from options import *
import graphutil
import datautil
## ==============================================
## LOGGING CONFIGURATION
## ==============================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(
fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## ==============================================
## CONFIGURATION
## ==============================================
def computeEvictionStats(dataFile):
colMap, csvData = datautil.getCSVData(dataFile)
if len(csvData) == 0: return
allTimes = [ ]
allTuples = [ ]
allBlocks = [ ]
allBytes = [ ]
for row in csvData:
allTimes.append(row[colMap["STOP"]] - row[colMap["START"]])
allTuples.append(int(row[colMap["TUPLES_EVICTED"]]))
allBlocks.append(int(row[colMap["TUPLES_EVICTED"]]))
allBytes.append(int(row[colMap["BYTES_EVICTED"]]))
print dataFile
print " Average Time: %.2f ms" % np.mean(allTimes)
print " Average Tuples: %.2f" % np.mean(allTuples)
print " Average Blocks: %.2f" % np.mean(allBlocks)
print " Average Bytes: %.2f MB" % (np.mean(allBytes)/float(1024*1024))
print
# DEF
## ==============================================
## main
## ==============================================
if __name__ == '__main__':
matches = []
for root, dirnames, filenames in os.walk(OPT_DATA_HSTORE):
for filename in fnmatch.filter(filenames, 'evictions.csv'):
matches.append(os.path.join(root, filename))
map(computeEvictionStats, matches)
## MAIN | gpl-3.0 |
laserson/phip-stat | phip/clipped_factorization.py | 1 | 6907 | import time
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.contrib.distributions import percentile
def do_clipped_factorization(
counts_df,
rank=3,
clip_percentile=99.9,
learning_rate=1.0,
minibatch_size=1024 * 32,
patience=5,
max_epochs=1000,
normalize_to_reads_per_million=True,
log_every_seconds=10,
):
"""
Attempt to detect and correct for clone and sample batch effects by
subtracting off a learned low-rank reconstruction of the counts matrix.
The return value is the clones x samples matrix of residuals after
correcting for batch effects, with a few additional rows and columns giving
the learned background effects.
Implements the factorization:
X = AB
where X is (clones x samples), A is (clones x rank), and B is
(rank x samples)
by minimizing the "clipped" loss:
||minimum(X - AB, percentile(X - AB, clip_percentile)||_2 + unclipped
The minimum is taken elementwise, and ||...||_2 is the Frobenius norm.
clip_percentile is a parameter giving the percentile to clip at. The
clipping makes the factorization robust to outliers, some of which are
likely phip-seq hits.
If the above is optimized without an `unclipped` term, a few phage clones
may have all of their residuals above the truncation threshold. Once this
happens they will likely stay stuck there since they do not contribute to
the gradient. The unclipped term fixes this by providing a small nudge
toward smaller errors without truncation.
Note that "beads-only" samples are not treated in any special way here.
The optimization is performed using stochastic gradient descent (SGD) on
tensorflow.
Parameters
----------
counts_df : pandas.DataFrame
Matrix of read counts (clones x samples)
rank : int
Rank of low-dimensional background effect matrices A and B
clip_percentile : float
Elements with reconstruction errors above this percentile do not
contribute to the gradient. Aim for a lower-bound on the fraction
of entries you expect NOT to be hits.
learning_rate : float
SGD optimizer learning rate
minibatch_size : int
Number of rows per SGD minibatch
patience : int
Number of epochs without improvement in training loss to tolerate before
stopping
max_epochs : int
Maximum number of epochs
normalize_to_reads_per_million : boolean
Before computing factorization, first divide each column by the total
number of reads for that sample and multiple by 1 million.
log_every_seconds : float
Seconds to wait before printing another optimization status update
Returns
-------
pandas.DataFrame : residuals after correcting for batch effects
In addition to the clones x samples residuals, rows and columns named
"_background_0", "_background_1", ... giving the learned background vectors
are also included.
"""
# Non-tf setup
if normalize_to_reads_per_million:
observed = (counts_df * 1e6 / counts_df.sum(0)).astype("float32")
else:
observed = counts_df.astype("float32")
(n, s) = observed.shape
if len(counts_df) < minibatch_size:
minibatch_size = len(counts_df)
# Placeholders
target = tf.placeholder(name="target", dtype="float32", shape=[None, s])
minibatch_indices = tf.placeholder(name="minibatch_indices", dtype="int32")
# Variables
a = tf.Variable(np.random.rand(n, rank), name="A", dtype="float32")
b = tf.Variable(np.random.rand(rank, s), name="B", dtype="float32")
clip_threshold = tf.Variable(observed.max().max())
# Derived quantities
reconstruction = tf.matmul(tf.gather(a, minibatch_indices), b)
differences = target - reconstruction
# unclipped_term is based only on the minimum unclipped error for each
# clone. The intuition is that we know for every clone at least one sample
# must be a non-hit (e.g. a beads only sample), and so should be well modeled
# by the background process.
unclipped_term = tf.reduce_min(tf.pow(differences, 2), axis=1)
loss = (
tf.reduce_mean(tf.pow(tf.minimum(differences, clip_threshold), 2))
+ tf.reduce_mean(unclipped_term) / s
)
update_clip_value = clip_threshold.assign(percentile(differences, clip_percentile))
# Training
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
init = tf.global_variables_initializer()
best_cost_value = None
last_log_at = 0
with tf.Session() as session:
session.run(init)
all_indices = np.arange(observed.shape[0], dtype=int)
for i in range(max_epochs):
indices = np.array(list(range(observed.shape[0])))
np.random.shuffle(indices)
for minibatch_indices_value in np.array_split(
indices, int(len(indices) / minibatch_size)
):
minibatch_indices_value = minibatch_indices_value[:minibatch_size]
if len(minibatch_indices_value) == minibatch_size:
feed_dict = {
target: observed.values[minibatch_indices_value],
minibatch_indices: minibatch_indices_value,
}
session.run(train_step, feed_dict=feed_dict)
feed_dict = {target: observed, minibatch_indices: all_indices}
(clip_threshold_value, cost_value) = session.run(
[update_clip_value, loss], feed_dict=feed_dict
)
# Update best epoch
if best_cost_value is None or cost_value < best_cost_value:
best_cost_value = cost_value
best_epoch = i
(best_a, best_b) = session.run([a, b], feed_dict=feed_dict)
# Log
if log_every_seconds and time.time() - last_log_at > log_every_seconds:
print(
"[Epoch %5d] %f, truncating at %f%s"
% (
i,
cost_value,
clip_threshold_value,
" [new best]" if i == best_epoch else "",
)
)
# Stop criterion
if i - best_epoch > patience:
print("Early stopping at epoch %d." % i)
break
background_names = ["_background_%d" % i for i in range(rank)]
best_a = pd.DataFrame(best_a, index=observed.index, columns=background_names)
best_b = pd.DataFrame(best_b, index=background_names, columns=observed.columns)
results = observed - np.matmul(best_a, best_b)
for name in background_names:
results[name] = best_a[name]
results.loc[name] = best_b.loc[name]
return results
| apache-2.0 |
jlcarmic/producthunt_simulator | venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py | 12 | 221093 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5864
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(B, C), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.